diff --git a/.all-contributorsrc b/.all-contributorsrc index c927666bb1a9..eac27828eed2 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -796,6 +796,24 @@ "contributions": [ "code" ] + }, + { + "login": "scrocquesel", + "name": "Sébastien Crocquesel", + "avatar_url": "https://avatars.githubusercontent.com/u/88554524?v=4", + "profile": "https://www.inulogic.fr", + "contributions": [ + "code" + ] + }, + { + "login": "dave-fn", + "name": "David Negrete", + "avatar_url": "https://avatars.githubusercontent.com/u/21349334?v=4", + "profile": "https://github.com/dave-fn", + "contributions": [ + "code" + ] } ], "contributorsPerLine": 7, @@ -804,5 +822,6 @@ "repoType": "github", "repoHost": "https://github.com", "skipCi": true, - "commitConvention": "angular" + "commitConvention": "angular", + "commitType": "docs" } diff --git a/.brazil.json b/.brazil.json index 9059e5646031..1f0931d0747b 100644 --- a/.brazil.json +++ b/.brazil.json @@ -109,6 +109,7 @@ "io.netty:netty-common": { "packageName": "Netty4", "packageVersion": "4.1" }, "io.netty:netty-handler": { "packageName": "Netty4", "packageVersion": "4.1" }, "io.netty:netty-resolver": { "packageName": "Netty4", "packageVersion": "4.1" }, + "io.netty:netty-resolver-dns": { "packageName": "Netty4", "packageVersion": "4.1" }, "io.netty:netty-transport": { "packageName": "Netty4", "packageVersion": "4.1" }, "io.netty:netty-transport-classes-epoll": { "packageName": "Netty4", "packageVersion": "4.1" }, "io.netty:netty-transport-native-unix-common": { "packageName": "Netty4", "packageVersion": "4.1" }, diff --git a/.changes/2.20.83.json b/.changes/2.20.83.json new file mode 100644 index 000000000000..42dffd691342 --- /dev/null +++ b/.changes/2.20.83.json @@ -0,0 +1,30 @@ +{ + "version": "2.20.83", + "date": "2023-06-09", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Fixed issue with leased connection leaks when threads executing HTTP connections with Apache HttpClient were interrupted while the connection was in progress." + }, + { + "type": "bugfix", + "category": "Netty NIO HTTP Client", + "contributor": "martinKindall", + "description": "By default, Netty threads are blocked during dns resolution, namely InetAddress.getByName is used under the hood. Now, there's an option to configure the NettyNioAsyncHttpClient in order to use a non blocking dns resolution strategy." + }, + { + "type": "feature", + "category": "AWS Certificate Manager Private Certificate Authority", + "contributor": "", + "description": "Document-only update to refresh CLI documentation for AWS Private CA. No change to the service." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "This release adds search APIs for Prompts, Quick Connects and Hours of Operations, which can be used to search for those resources within a Connect Instance." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.84.json b/.changes/2.20.84.json new file mode 100644 index 000000000000..297e632d6fb7 --- /dev/null +++ b/.changes/2.20.84.json @@ -0,0 +1,66 @@ +{ + "version": "2.20.84", + "date": "2023-06-12", + "entries": [ + { + "type": "feature", + "category": "AWS Amplify UI Builder", + "contributor": "", + "description": "AWS Amplify UIBuilder is launching Codegen UI, a new feature that enables you to generate your amplify uibuilder components and forms." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "`IdleConnectionReaper` now does not prevent `HttpClientConnectionManager` from getting GC'd in the case where an SDK client is created per request and not closed." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "StephenFlavin", + "description": "Add \"unsafe\" and \"fromRemaining\" AsyncRequestBody constructors for byte arrays and ByteBuffers" + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "contributor": "", + "description": "Documentation updates for DynamoDB" + }, + { + "type": "feature", + "category": "Amazon DynamoDB Streams", + "contributor": "", + "description": "Documentation updates for DynamoDB Streams" + }, + { + "type": "feature", + "category": "Amazon FSx", + "contributor": "", + "description": "Amazon FSx for NetApp ONTAP now supports joining a storage virtual machine (SVM) to Active Directory after the SVM has been created." + }, + { + "type": "feature", + "category": "Amazon OpenSearch Service", + "contributor": "", + "description": "This release adds support for SkipUnavailable connection property for cross cluster search" + }, + { + "type": "feature", + "category": "Amazon Rekognition", + "contributor": "", + "description": "This release adds support for improved accuracy with user vector in Amazon Rekognition Face Search. Adds new APIs: AssociateFaces, CreateUser, DeleteUser, DisassociateFaces, ListUsers, SearchUsers, SearchUsersByImage. Also adds new face metadata that can be stored: user vector." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Sagemaker Neo now supports compilation for inferentia2 (ML_INF2) and Trainium1 (ML_TRN1) as available targets. With these devices, you can run your workloads at highest performance with lowest cost. inferentia2 (ML_INF2) is available in CMH and Trainium1 (ML_TRN1) is available in IAD currently" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.85.json b/.changes/2.20.85.json new file mode 100644 index 000000000000..ce7650672d7d --- /dev/null +++ b/.changes/2.20.85.json @@ -0,0 +1,84 @@ +{ + "version": "2.20.85", + "date": "2023-06-13", + "entries": [ + { + "type": "feature", + "category": "AWS CloudTrail", + "contributor": "", + "description": "This feature allows users to view dashboards for CloudTrail Lake event data stores." + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "contributor": "", + "description": "Add support for Security Hub Automation Rules" + }, + { + "type": "feature", + "category": "AWS SimSpace Weaver", + "contributor": "", + "description": "This release fixes using aws-us-gov ARNs in API calls and adds documentation for snapshot APIs." + }, + { + "type": "feature", + "category": "AWS WAFV2", + "contributor": "", + "description": "You can now detect and block fraudulent account creation attempts with the new AWS WAF Fraud Control account creation fraud prevention (ACFP) managed rule group AWSManagedRulesACFPRuleSet." + }, + { + "type": "feature", + "category": "AWS Well-Architected Tool", + "contributor": "", + "description": "AWS Well-Architected now supports Profiles that help customers prioritize which questions to focus on first by providing a list of prioritized questions that are better aligned with their business goals and outcomes." + }, + { + "type": "feature", + "category": "Amazon CodeGuru Security", + "contributor": "", + "description": "Initial release of Amazon CodeGuru Security APIs" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release introduces a new feature, EC2 Instance Connect Endpoint, that enables you to connect to a resource over TCP, without requiring the resource to have a public IPv4 address." + }, + { + "type": "feature", + "category": "Amazon Lightsail", + "contributor": "", + "description": "This release adds pagination for the Get Certificates API operation." + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "contributor": "", + "description": "Integrate double encryption feature to SDKs." + }, + { + "type": "feature", + "category": "Amazon Verified Permissions", + "contributor": "", + "description": "GA release of Amazon Verified Permissions." + }, + { + "type": "feature", + "category": "EC2 Image Builder", + "contributor": "", + "description": "Change the Image Builder ImagePipeline dateNextRun field to more accurately describe the data." + }, + { + "type": "feature", + "category": "Elastic Disaster Recovery Service", + "contributor": "", + "description": "Added APIs to support network replication and recovery using AWS Elastic Disaster Recovery." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.86.json b/.changes/2.20.86.json new file mode 100644 index 000000000000..33a7fb16a578 --- /dev/null +++ b/.changes/2.20.86.json @@ -0,0 +1,48 @@ +{ + "version": "2.20.86", + "date": "2023-06-15", + "entries": [ + { + "type": "bugfix", + "category": "Amazon DynamoDB Enhanced", + "contributor": "breader124", + "description": "Thanks to this bugfix it'll be possible to create DynamoDB table containing\nsecondary indices when using no arugments `createTable` method from `DefaultDynamoDbTable`\nclass. Information about their presence might be expressed using annotations, but it was ignored\nand created tables didn't contain specified indices. Plase note that it is still not possible\nto specify projections for indices using annotations. By default, all fields will be projected." + }, + { + "type": "feature", + "category": "AWS Audit Manager", + "contributor": "", + "description": "This release introduces 2 Audit Manager features: CSV exports and new manual evidence options. You can now export your evidence finder results in CSV format. In addition, you can now add manual evidence to a control by entering free-form text or uploading a file from your browser." + }, + { + "type": "feature", + "category": "Amazon Elastic File System", + "contributor": "", + "description": "Documentation updates for EFS." + }, + { + "type": "feature", + "category": "Amazon GuardDuty", + "contributor": "", + "description": "Updated descriptions for some APIs." + }, + { + "type": "feature", + "category": "Amazon Location Service", + "contributor": "", + "description": "Amazon Location Service adds categories to places, including filtering on those categories in searches. Also, you can now add metadata properties to your geofences." + }, + { + "type": "feature", + "category": "DynamoDB Enhanced Client", + "contributor": "bmaizels", + "description": "Add EnhancedType parameters to static builder methods of StaticTableSchema and StaticImmitableTableSchema" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.87.json b/.changes/2.20.87.json new file mode 100644 index 000000000000..78a820206e9e --- /dev/null +++ b/.changes/2.20.87.json @@ -0,0 +1,42 @@ +{ + "version": "2.20.87", + "date": "2023-06-16", + "entries": [ + { + "type": "bugfix", + "category": "Amazon DynamoDB", + "contributor": "martinKindall", + "description": "Created static method EnumAttributeConverter::createWithNameAsKeys which creates a converter based on the Enum::name method to identify enums, rather than Enum::toString. This is preferable because Enum::name is final and cannot be overwritten, as opposed to Enum::toString. EnumAttributeConverter::create is kept as it is, for backward compatibility." + }, + { + "type": "feature", + "category": "AWS Account", + "contributor": "", + "description": "Improve pagination support for ListRegions" + }, + { + "type": "feature", + "category": "AWS Application Discovery Service", + "contributor": "", + "description": "Add Amazon EC2 instance recommendations export" + }, + { + "type": "feature", + "category": "AWS Identity and Access Management", + "contributor": "", + "description": "Documentation updates for AWS Identity and Access Management (IAM)." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "Updates the *InstanceStorageConfig APIs to support a new ResourceType: SCREEN_RECORDINGS to enable screen recording and specify the storage configurations for publishing the recordings. Also updates DescribeInstance and ListInstances APIs to include InstanceAccessUrl attribute in the API response." + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "contributor": "", + "description": "This release adds SDK support for request-payer request header and request-charged response header in the \"GetBucketAccelerateConfiguration\", \"ListMultipartUploads\", \"ListObjects\", \"ListObjectsV2\" and \"ListObjectVersions\" S3 APIs." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.88.json b/.changes/2.20.88.json new file mode 100644 index 000000000000..41dfe7f02632 --- /dev/null +++ b/.changes/2.20.88.json @@ -0,0 +1,54 @@ +{ + "version": "2.20.88", + "date": "2023-06-19", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "flittev", + "description": "`WaiterExecutor` recursive implementation changed to iterative" + }, + { + "type": "feature", + "category": "AWS CloudFormation", + "contributor": "", + "description": "Specify desired CloudFormation behavior in the event of ChangeSet execution failure using the CreateChangeSet OnStackFailure parameter" + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "This release adds support for creating cross region table/database resource links" + }, + { + "type": "feature", + "category": "AWS Price List Service", + "contributor": "", + "description": "This release updates the PriceListArn regex pattern." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "Documentation only update to address various tickets." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "API changes to AWS Verified Access to include data from trust providers in logs" + }, + { + "type": "feature", + "category": "Amazon Route 53 Domains", + "contributor": "", + "description": "Update MaxItems upper bound to 1000 for ListPricesRequest" + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Amazon Sagemaker Autopilot releases CreateAutoMLJobV2 and DescribeAutoMLJobV2 for Autopilot customers with ImageClassification, TextClassification and Tabular problem type config support." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.89.json b/.changes/2.20.89.json new file mode 100644 index 000000000000..bc9f9f8cbf13 --- /dev/null +++ b/.changes/2.20.89.json @@ -0,0 +1,42 @@ +{ + "version": "2.20.89", + "date": "2023-06-20", + "entries": [ + { + "type": "feature", + "category": "AWS Config", + "contributor": "", + "description": "Updated ResourceType enum with new resource types onboarded by AWS Config in May 2023." + }, + { + "type": "feature", + "category": "AWS Lambda", + "contributor": "", + "description": "This release adds RecursiveInvocationException to the Invoke API and InvokeWithResponseStream API." + }, + { + "type": "feature", + "category": "Amazon Appflow", + "contributor": "", + "description": "This release adds new API to reset connector metadata cache" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Adds support for targeting Dedicated Host allocations by assetIds in AWS Outposts" + }, + { + "type": "feature", + "category": "Amazon Redshift", + "contributor": "", + "description": "Added support for custom domain names for Redshift Provisioned clusters. This feature enables customers to create a custom domain name and use ACM to generate fully secure connections to it." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index eff26ede9c2c..28297d0d126f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,250 @@ +# __2.20.89__ __2023-06-20__ +## __AWS Config__ + - ### Features + - Updated ResourceType enum with new resource types onboarded by AWS Config in May 2023. + +## __AWS Lambda__ + - ### Features + - This release adds RecursiveInvocationException to the Invoke API and InvokeWithResponseStream API. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Appflow__ + - ### Features + - This release adds new API to reset connector metadata cache + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Adds support for targeting Dedicated Host allocations by assetIds in AWS Outposts + +## __Amazon Redshift__ + - ### Features + - Added support for custom domain names for Redshift Provisioned clusters. This feature enables customers to create a custom domain name and use ACM to generate fully secure connections to it. + +# __2.20.88__ __2023-06-19__ +## __AWS CloudFormation__ + - ### Features + - Specify desired CloudFormation behavior in the event of ChangeSet execution failure using the CreateChangeSet OnStackFailure parameter + +## __AWS Glue__ + - ### Features + - This release adds support for creating cross region table/database resource links + +## __AWS Price List Service__ + - ### Features + - This release updates the PriceListArn regex pattern. + +## __AWS SDK for Java v2__ + - ### Bugfixes + - `WaiterExecutor` recursive implementation changed to iterative + - Contributed by: [@flittev](https://github.com/flittev) + +## __Amazon EC2 Container Service__ + - ### Features + - Documentation only update to address various tickets. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - API changes to AWS Verified Access to include data from trust providers in logs + +## __Amazon Route 53 Domains__ + - ### Features + - Update MaxItems upper bound to 1000 for ListPricesRequest + +## __Amazon SageMaker Service__ + - ### Features + - Amazon Sagemaker Autopilot releases CreateAutoMLJobV2 and DescribeAutoMLJobV2 for Autopilot customers with ImageClassification, TextClassification and Tabular problem type config support. + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@flittev](https://github.com/flittev) +# __2.20.87__ __2023-06-16__ +## __AWS Account__ + - ### Features + - Improve pagination support for ListRegions + +## __AWS Application Discovery Service__ + - ### Features + - Add Amazon EC2 instance recommendations export + +## __AWS Identity and Access Management__ + - ### Features + - Documentation updates for AWS Identity and Access Management (IAM). + +## __Amazon Connect Service__ + - ### Features + - Updates the *InstanceStorageConfig APIs to support a new ResourceType: SCREEN_RECORDINGS to enable screen recording and specify the storage configurations for publishing the recordings. Also updates DescribeInstance and ListInstances APIs to include InstanceAccessUrl attribute in the API response. + +## __Amazon DynamoDB__ + - ### Bugfixes + - Created static method EnumAttributeConverter::createWithNameAsKeys which creates a converter based on the Enum::name method to identify enums, rather than Enum::toString. This is preferable because Enum::name is final and cannot be overwritten, as opposed to Enum::toString. EnumAttributeConverter::create is kept as it is, for backward compatibility. + - Contributed by: [@martinKindall](https://github.com/martinKindall) + +## __Amazon Simple Storage Service__ + - ### Features + - This release adds SDK support for request-payer request header and request-charged response header in the "GetBucketAccelerateConfiguration", "ListMultipartUploads", "ListObjects", "ListObjectsV2" and "ListObjectVersions" S3 APIs. + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@martinKindall](https://github.com/martinKindall) +# __2.20.86__ __2023-06-15__ +## __AWS Audit Manager__ + - ### Features + - This release introduces 2 Audit Manager features: CSV exports and new manual evidence options. You can now export your evidence finder results in CSV format. In addition, you can now add manual evidence to a control by entering free-form text or uploading a file from your browser. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon DynamoDB Enhanced__ + - ### Bugfixes + - Thanks to this bugfix it'll be possible to create DynamoDB table containing + secondary indices when using no arugments `createTable` method from `DefaultDynamoDbTable` + class. Information about their presence might be expressed using annotations, but it was ignored + and created tables didn't contain specified indices. Plase note that it is still not possible + to specify projections for indices using annotations. By default, all fields will be projected. + - Contributed by: [@breader124](https://github.com/breader124) + +## __Amazon Elastic File System__ + - ### Features + - Documentation updates for EFS. + +## __Amazon GuardDuty__ + - ### Features + - Updated descriptions for some APIs. + +## __Amazon Location Service__ + - ### Features + - Amazon Location Service adds categories to places, including filtering on those categories in searches. Also, you can now add metadata properties to your geofences. + +## __DynamoDB Enhanced Client__ + - ### Features + - Add EnhancedType parameters to static builder methods of StaticTableSchema and StaticImmitableTableSchema + - Contributed by: [@bmaizels](https://github.com/bmaizels) + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@breader124](https://github.com/breader124), [@bmaizels](https://github.com/bmaizels) +# __2.20.85__ __2023-06-13__ +## __AWS CloudTrail__ + - ### Features + - This feature allows users to view dashboards for CloudTrail Lake event data stores. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __AWS SecurityHub__ + - ### Features + - Add support for Security Hub Automation Rules + +## __AWS SimSpace Weaver__ + - ### Features + - This release fixes using aws-us-gov ARNs in API calls and adds documentation for snapshot APIs. + +## __AWS WAFV2__ + - ### Features + - You can now detect and block fraudulent account creation attempts with the new AWS WAF Fraud Control account creation fraud prevention (ACFP) managed rule group AWSManagedRulesACFPRuleSet. + +## __AWS Well-Architected Tool__ + - ### Features + - AWS Well-Architected now supports Profiles that help customers prioritize which questions to focus on first by providing a list of prioritized questions that are better aligned with their business goals and outcomes. + +## __Amazon CodeGuru Security__ + - ### Features + - Initial release of Amazon CodeGuru Security APIs + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release introduces a new feature, EC2 Instance Connect Endpoint, that enables you to connect to a resource over TCP, without requiring the resource to have a public IPv4 address. + +## __Amazon Lightsail__ + - ### Features + - This release adds pagination for the Get Certificates API operation. + +## __Amazon Simple Storage Service__ + - ### Features + - Integrate double encryption feature to SDKs. + +## __Amazon Verified Permissions__ + - ### Features + - GA release of Amazon Verified Permissions. + +## __EC2 Image Builder__ + - ### Features + - Change the Image Builder ImagePipeline dateNextRun field to more accurately describe the data. + +## __Elastic Disaster Recovery Service__ + - ### Features + - Added APIs to support network replication and recovery using AWS Elastic Disaster Recovery. + +# __2.20.84__ __2023-06-12__ +## __AWS Amplify UI Builder__ + - ### Features + - AWS Amplify UIBuilder is launching Codegen UI, a new feature that enables you to generate your amplify uibuilder components and forms. + +## __AWS SDK for Java v2__ + - ### Features + - Add "unsafe" and "fromRemaining" AsyncRequestBody constructors for byte arrays and ByteBuffers + - Contributed by: [@StephenFlavin](https://github.com/StephenFlavin) + - Updated endpoint and partition metadata. + - `IdleConnectionReaper` now does not prevent `HttpClientConnectionManager` from getting GC'd in the case where an SDK client is created per request and not closed. + +## __Amazon DynamoDB__ + - ### Features + - Documentation updates for DynamoDB + +## __Amazon DynamoDB Streams__ + - ### Features + - Documentation updates for DynamoDB Streams + +## __Amazon FSx__ + - ### Features + - Amazon FSx for NetApp ONTAP now supports joining a storage virtual machine (SVM) to Active Directory after the SVM has been created. + +## __Amazon OpenSearch Service__ + - ### Features + - This release adds support for SkipUnavailable connection property for cross cluster search + +## __Amazon Rekognition__ + - ### Features + - This release adds support for improved accuracy with user vector in Amazon Rekognition Face Search. Adds new APIs: AssociateFaces, CreateUser, DeleteUser, DisassociateFaces, ListUsers, SearchUsers, SearchUsersByImage. Also adds new face metadata that can be stored: user vector. + +## __Amazon SageMaker Service__ + - ### Features + - Sagemaker Neo now supports compilation for inferentia2 (ML_INF2) and Trainium1 (ML_TRN1) as available targets. With these devices, you can run your workloads at highest performance with lowest cost. inferentia2 (ML_INF2) is available in CMH and Trainium1 (ML_TRN1) is available in IAD currently + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@StephenFlavin](https://github.com/StephenFlavin) +# __2.20.83__ __2023-06-09__ +## __AWS Certificate Manager Private Certificate Authority__ + - ### Features + - Document-only update to refresh CLI documentation for AWS Private CA. No change to the service. + +## __AWS SDK for Java v2__ + - ### Bugfixes + - Fixed issue with leased connection leaks when threads executing HTTP connections with Apache HttpClient were interrupted while the connection was in progress. + +## __Amazon Connect Service__ + - ### Features + - This release adds search APIs for Prompts, Quick Connects and Hours of Operations, which can be used to search for those resources within a Connect Instance. + +## __Netty NIO HTTP Client__ + - ### Bugfixes + - By default, Netty threads are blocked during dns resolution, namely InetAddress.getByName is used under the hood. Now, there's an option to configure the NettyNioAsyncHttpClient in order to use a non blocking dns resolution strategy. + - Contributed by: [@martinKindall](https://github.com/martinKindall) + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@martinKindall](https://github.com/martinKindall) # __2.20.82__ __2023-06-08__ ## __AWS Comprehend Medical__ - ### Features diff --git a/README.md b/README.md index 12e4aca1144e..9cbb8321a57c 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ [![Gitter](https://badges.gitter.im/aws/aws-sdk-java-v2.svg)](https://gitter.im/aws/aws-sdk-java-v2?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![codecov](https://codecov.io/gh/aws/aws-sdk-java-v2/branch/master/graph/badge.svg)](https://codecov.io/gh/aws/aws-sdk-java-v2) -[![All Contributors](https://img.shields.io/badge/all_contributors-88-orange.svg?style=flat-square)](#contributors-) +[![All Contributors](https://img.shields.io/badge/all_contributors-89-orange.svg?style=flat-square)](#contributors-) The **AWS SDK for Java 2.0** is a rewrite of 1.0 with some great new features. As with version 1.0, @@ -52,7 +52,7 @@ To automatically manage module versions (currently all modules have the same ver software.amazon.awssdk bom - 2.20.82 + 2.20.89 pom import @@ -86,12 +86,12 @@ Alternatively you can add dependencies for the specific services you use only: software.amazon.awssdk ec2 - 2.20.82 + 2.20.89 software.amazon.awssdk s3 - 2.20.82 + 2.20.89 ``` @@ -103,7 +103,7 @@ You can import the whole SDK into your project (includes *ALL* services). Please software.amazon.awssdk aws-sdk-java - 2.20.82 + 2.20.89 ``` @@ -304,6 +304,8 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d Andy Kiesler
Andy Kiesler

💻 Martin
Martin

💻 Paulo Lieuthier
Paulo Lieuthier

💻 + Sébastien Crocquesel
Sébastien Crocquesel

💻 + David Negrete
David Negrete

💻 diff --git a/archetypes/archetype-app-quickstart/pom.xml b/archetypes/archetype-app-quickstart/pom.xml index 2db3cdb39d94..84f8b3cf9a8a 100644 --- a/archetypes/archetype-app-quickstart/pom.xml +++ b/archetypes/archetype-app-quickstart/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 diff --git a/archetypes/archetype-lambda/pom.xml b/archetypes/archetype-lambda/pom.xml index dd246c68d38d..3c22bbc11bd1 100644 --- a/archetypes/archetype-lambda/pom.xml +++ b/archetypes/archetype-lambda/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 archetype-lambda diff --git a/archetypes/archetype-tools/pom.xml b/archetypes/archetype-tools/pom.xml index 37ff384c05aa..c13c81572673 100644 --- a/archetypes/archetype-tools/pom.xml +++ b/archetypes/archetype-tools/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 diff --git a/archetypes/pom.xml b/archetypes/pom.xml index b447c87520bd..ab1bdf712874 100644 --- a/archetypes/pom.xml +++ b/archetypes/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 archetypes diff --git a/aws-sdk-java/pom.xml b/aws-sdk-java/pom.xml index 9d86f8d7669c..d3f60597cc33 100644 --- a/aws-sdk-java/pom.xml +++ b/aws-sdk-java/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ../pom.xml aws-sdk-java @@ -1753,6 +1753,16 @@ Amazon AutoScaling, etc). paymentcryptography ${awsjavasdk.version} + + software.amazon.awssdk + codegurusecurity + ${awsjavasdk.version} + + + software.amazon.awssdk + verifiedpermissions + ${awsjavasdk.version} + ${project.artifactId}-${project.version} diff --git a/bom-internal/pom.xml b/bom-internal/pom.xml index 8891b21a2664..0143a3251f98 100644 --- a/bom-internal/pom.xml +++ b/bom-internal/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 @@ -134,6 +134,16 @@ netty-buffer ${netty.version} + + io.netty + netty-resolver + ${netty.version} + + + io.netty + netty-resolver-dns + ${netty.version} + org.reactivestreams reactive-streams diff --git a/bom/pom.xml b/bom/pom.xml index 2b6f54c80a1e..d8839ed1e7a7 100644 --- a/bom/pom.xml +++ b/bom/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ../pom.xml bom @@ -1903,6 +1903,16 @@ paymentcryptography ${awsjavasdk.version} + + software.amazon.awssdk + codegurusecurity + ${awsjavasdk.version} + + + software.amazon.awssdk + verifiedpermissions + ${awsjavasdk.version} + diff --git a/bundle/pom.xml b/bundle/pom.xml index 5983781b1ebc..0f417bf01c53 100644 --- a/bundle/pom.xml +++ b/bundle/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT bundle jar diff --git a/codegen-lite-maven-plugin/pom.xml b/codegen-lite-maven-plugin/pom.xml index 91566cf86aee..5b5edbf3107d 100644 --- a/codegen-lite-maven-plugin/pom.xml +++ b/codegen-lite-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ../pom.xml codegen-lite-maven-plugin diff --git a/codegen-lite/pom.xml b/codegen-lite/pom.xml index 715ac5a57988..a7aa8c4580a4 100644 --- a/codegen-lite/pom.xml +++ b/codegen-lite/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT codegen-lite AWS Java SDK :: Code Generator Lite diff --git a/codegen-maven-plugin/pom.xml b/codegen-maven-plugin/pom.xml index 20b30a6d6cba..0b46119e1d74 100644 --- a/codegen-maven-plugin/pom.xml +++ b/codegen-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ../pom.xml codegen-maven-plugin diff --git a/codegen/pom.xml b/codegen/pom.xml index c5f7ed6e8736..5b30914bde9a 100644 --- a/codegen/pom.xml +++ b/codegen/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT codegen AWS Java SDK :: Code Generator diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/AsyncClientBuilderClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/AsyncClientBuilderClass.java index f8731d7dad07..bd0e6023d53e 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/AsyncClientBuilderClass.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/AsyncClientBuilderClass.java @@ -18,8 +18,12 @@ import com.squareup.javapoet.ClassName; import com.squareup.javapoet.MethodSpec; import com.squareup.javapoet.ParameterizedTypeName; +import com.squareup.javapoet.TypeName; import com.squareup.javapoet.TypeSpec; import java.net.URI; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; import javax.lang.model.element.Modifier; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.token.credentials.SdkTokenProvider; @@ -32,6 +36,9 @@ import software.amazon.awssdk.codegen.utils.AuthUtils; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.client.config.SdkClientOption; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.protocols.query.interceptor.QueryParametersToBodyInterceptor; +import software.amazon.awssdk.utils.CollectionUtils; public class AsyncClientBuilderClass implements ClassSpec { private final IntermediateModel model; @@ -119,26 +126,53 @@ private MethodSpec endpointProviderMethod() { } private MethodSpec buildClientMethod() { - return MethodSpec.methodBuilder("buildClient") - .addAnnotation(Override.class) - .addModifiers(Modifier.PROTECTED, Modifier.FINAL) - .returns(clientInterfaceName) - .addStatement("$T clientConfiguration = super.asyncClientConfiguration()", SdkClientConfiguration.class) - .addStatement("this.validateClientOptions(clientConfiguration)") - .addStatement("$T endpointOverride = null", URI.class) - .addCode("if (clientConfiguration.option($T.ENDPOINT_OVERRIDDEN) != null" - + "&& $T.TRUE.equals(clientConfiguration.option($T.ENDPOINT_OVERRIDDEN))) {" - + "endpointOverride = clientConfiguration.option($T.ENDPOINT);" - + "}", - SdkClientOption.class, Boolean.class, SdkClientOption.class, SdkClientOption.class) - .addStatement("$T serviceClientConfiguration = $T.builder()" - + ".overrideConfiguration(overrideConfiguration())" - + ".region(clientConfiguration.option($T.AWS_REGION))" - + ".endpointOverride(endpointOverride)" - + ".build()", - serviceConfigClassName, serviceConfigClassName, AwsClientOption.class) - .addStatement("return new $T(serviceClientConfiguration, clientConfiguration)", clientClassName) - .build(); + MethodSpec.Builder b = MethodSpec.methodBuilder("buildClient") + .addAnnotation(Override.class) + .addModifiers(Modifier.PROTECTED, Modifier.FINAL) + .returns(clientInterfaceName) + .addStatement("$T clientConfiguration = super.asyncClientConfiguration()", + SdkClientConfiguration.class); + + addQueryProtocolInterceptors(b); + + return b.addStatement("this.validateClientOptions(clientConfiguration)") + .addStatement("$T endpointOverride = null", URI.class) + .addCode("if (clientConfiguration.option($T.ENDPOINT_OVERRIDDEN) != null" + + "&& $T.TRUE.equals(clientConfiguration.option($T.ENDPOINT_OVERRIDDEN))) {" + + "endpointOverride = clientConfiguration.option($T.ENDPOINT);" + + "}", + SdkClientOption.class, Boolean.class, SdkClientOption.class, SdkClientOption.class) + .addStatement("$T serviceClientConfiguration = $T.builder()" + + ".overrideConfiguration(overrideConfiguration())" + + ".region(clientConfiguration.option($T.AWS_REGION))" + + ".endpointOverride(endpointOverride)" + + ".build()", + serviceConfigClassName, serviceConfigClassName, AwsClientOption.class) + .addStatement("return new $T(serviceClientConfiguration, clientConfiguration)", clientClassName) + .build(); + } + + private MethodSpec.Builder addQueryProtocolInterceptors(MethodSpec.Builder b) { + if (!model.getMetadata().isQueryProtocol()) { + return b; + } + + TypeName listType = ParameterizedTypeName.get(List.class, ExecutionInterceptor.class); + + b.addStatement("$T interceptors = clientConfiguration.option($T.EXECUTION_INTERCEPTORS)", + listType, SdkClientOption.class) + .addStatement("$T queryParamsToBodyInterceptor = $T.singletonList(new $T())", + listType, Collections.class, QueryParametersToBodyInterceptor.class) + .addStatement("$T customizationInterceptors = new $T<>()", listType, ArrayList.class); + + List customInterceptors = model.getCustomizationConfig().getInterceptors(); + customInterceptors.forEach(i -> b.addStatement("customizationInterceptors.add(new $T())", ClassName.bestGuess(i))); + + b.addStatement("interceptors = $T.mergeLists(queryParamsToBodyInterceptor, interceptors)", CollectionUtils.class) + .addStatement("interceptors = $T.mergeLists(customizationInterceptors, interceptors)", CollectionUtils.class); + + return b.addStatement("clientConfiguration = clientConfiguration.toBuilder().option($T.EXECUTION_INTERCEPTORS, " + + "interceptors).build()", SdkClientOption.class); } private MethodSpec bearerTokenProviderMethod() { diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java index 1be4d730040e..72d534d5ab99 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java @@ -28,7 +28,6 @@ import com.squareup.javapoet.TypeSpec; import com.squareup.javapoet.TypeVariableName; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; @@ -59,7 +58,6 @@ import software.amazon.awssdk.core.signer.Signer; import software.amazon.awssdk.http.Protocol; import software.amazon.awssdk.http.SdkHttpConfigurationOption; -import software.amazon.awssdk.protocols.query.interceptor.QueryParametersToBodyInterceptor; import software.amazon.awssdk.utils.AttributeMap; import software.amazon.awssdk.utils.CollectionUtils; import software.amazon.awssdk.utils.StringUtils; @@ -262,8 +260,10 @@ private MethodSpec finalizeServiceConfigurationMethod() { builtInInterceptors.add(endpointRulesSpecUtils.authSchemesInterceptorName()); builtInInterceptors.add(endpointRulesSpecUtils.requestModifierInterceptorName()); - for (String interceptor : model.getCustomizationConfig().getInterceptors()) { - builtInInterceptors.add(ClassName.bestGuess(interceptor)); + if (!model.getMetadata().isQueryProtocol()) { + for (String interceptor : model.getCustomizationConfig().getInterceptors()) { + builtInInterceptors.add(ClassName.bestGuess(interceptor)); + } } for (ClassName interceptor : builtInInterceptors) { @@ -288,16 +288,6 @@ private MethodSpec finalizeServiceConfigurationMethod() { builder.addCode("interceptors = $T.mergeLists(interceptors, config.option($T.EXECUTION_INTERCEPTORS));\n", CollectionUtils.class, SdkClientOption.class); - if (model.getMetadata().isQueryProtocol()) { - TypeName listType = ParameterizedTypeName.get(List.class, ExecutionInterceptor.class); - builder.addStatement("$T protocolInterceptors = $T.singletonList(new $T())", - listType, - Collections.class, - QueryParametersToBodyInterceptor.class); - builder.addStatement("interceptors = $T.mergeLists(interceptors, protocolInterceptors)", - CollectionUtils.class); - } - if (model.getEndpointOperation().isPresent()) { builder.beginControlFlow("if (!endpointDiscoveryEnabled)") .addStatement("$1T chain = new $1T(config)", DefaultEndpointDiscoveryProviderChain.class) diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/SyncClientBuilderClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/SyncClientBuilderClass.java index 036589de04e8..8b330e76ce1b 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/SyncClientBuilderClass.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/SyncClientBuilderClass.java @@ -18,8 +18,12 @@ import com.squareup.javapoet.ClassName; import com.squareup.javapoet.MethodSpec; import com.squareup.javapoet.ParameterizedTypeName; +import com.squareup.javapoet.TypeName; import com.squareup.javapoet.TypeSpec; import java.net.URI; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; import javax.lang.model.element.Modifier; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.token.credentials.SdkTokenProvider; @@ -32,6 +36,9 @@ import software.amazon.awssdk.codegen.utils.AuthUtils; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.client.config.SdkClientOption; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.protocols.query.interceptor.QueryParametersToBodyInterceptor; +import software.amazon.awssdk.utils.CollectionUtils; public class SyncClientBuilderClass implements ClassSpec { private final IntermediateModel model; @@ -119,26 +126,53 @@ private MethodSpec endpointProviderMethod() { private MethodSpec buildClientMethod() { - return MethodSpec.methodBuilder("buildClient") - .addAnnotation(Override.class) - .addModifiers(Modifier.PROTECTED, Modifier.FINAL) - .returns(clientInterfaceName) - .addStatement("$T clientConfiguration = super.syncClientConfiguration()", SdkClientConfiguration.class) - .addStatement("this.validateClientOptions(clientConfiguration)") - .addStatement("$T endpointOverride = null", URI.class) - .addCode("if (clientConfiguration.option($T.ENDPOINT_OVERRIDDEN) != null" - + "&& $T.TRUE.equals(clientConfiguration.option($T.ENDPOINT_OVERRIDDEN))) {" - + "endpointOverride = clientConfiguration.option($T.ENDPOINT);" - + "}", - SdkClientOption.class, Boolean.class, SdkClientOption.class, SdkClientOption.class) - .addStatement("$T serviceClientConfiguration = $T.builder()" - + ".overrideConfiguration(overrideConfiguration())" - + ".region(clientConfiguration.option($T.AWS_REGION))" - + ".endpointOverride(endpointOverride)" - + ".build()", - serviceConfigClassName, serviceConfigClassName, AwsClientOption.class) - .addStatement("return new $T(serviceClientConfiguration, clientConfiguration)", clientClassName) - .build(); + MethodSpec.Builder b = MethodSpec.methodBuilder("buildClient") + .addAnnotation(Override.class) + .addModifiers(Modifier.PROTECTED, Modifier.FINAL) + .returns(clientInterfaceName) + .addStatement("$T clientConfiguration = super.syncClientConfiguration()", + SdkClientConfiguration.class); + + addQueryProtocolInterceptors(b); + + return b.addStatement("this.validateClientOptions(clientConfiguration)") + .addStatement("$T endpointOverride = null", URI.class) + .addCode("if (clientConfiguration.option($T.ENDPOINT_OVERRIDDEN) != null" + + "&& $T.TRUE.equals(clientConfiguration.option($T.ENDPOINT_OVERRIDDEN))) {" + + "endpointOverride = clientConfiguration.option($T.ENDPOINT);" + + "}", + SdkClientOption.class, Boolean.class, SdkClientOption.class, SdkClientOption.class) + .addStatement("$T serviceClientConfiguration = $T.builder()" + + ".overrideConfiguration(overrideConfiguration())" + + ".region(clientConfiguration.option($T.AWS_REGION))" + + ".endpointOverride(endpointOverride)" + + ".build()", + serviceConfigClassName, serviceConfigClassName, AwsClientOption.class) + .addStatement("return new $T(serviceClientConfiguration, clientConfiguration)", clientClassName) + .build(); + } + + private MethodSpec.Builder addQueryProtocolInterceptors(MethodSpec.Builder b) { + if (!model.getMetadata().isQueryProtocol()) { + return b; + } + + TypeName listType = ParameterizedTypeName.get(List.class, ExecutionInterceptor.class); + + b.addStatement("$T interceptors = clientConfiguration.option($T.EXECUTION_INTERCEPTORS)", + listType, SdkClientOption.class) + .addStatement("$T queryParamsToBodyInterceptor = $T.singletonList(new $T())", + listType, Collections.class, QueryParametersToBodyInterceptor.class) + .addStatement("$T customizationInterceptors = new $T<>()", listType, ArrayList.class); + + List customInterceptors = model.getCustomizationConfig().getInterceptors(); + customInterceptors.forEach(i -> b.addStatement("customizationInterceptors.add(new $T())", ClassName.bestGuess(i))); + + b.addStatement("interceptors = $T.mergeLists(queryParamsToBodyInterceptor, interceptors)", CollectionUtils.class) + .addStatement("interceptors = $T.mergeLists(customizationInterceptors, interceptors)", CollectionUtils.class); + + return b.addStatement("clientConfiguration = clientConfiguration.toBuilder().option($T.EXECUTION_INTERCEPTORS, " + + "interceptors).build()", SdkClientOption.class); } private MethodSpec tokenProviderMethodImpl() { diff --git a/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource b/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource index 5d5d39a6c9d5..2018b804f3d7 100644 --- a/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource +++ b/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource @@ -187,6 +187,17 @@ }, "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", "regions" : { } + }, { + "id" : "aws-iso-f", + "outputs" : { + "dnsSuffix" : "csp.hci.ic.gov", + "dualStackDnsSuffix" : "csp.hci.ic.gov", + "name" : "aws-iso-f", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-isof\\-\\w+\\-\\d+$", + "regions" : { } } ], "version" : "1.1" } \ No newline at end of file diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/internal/QueryProtocolCustomTestInterceptor.java b/codegen/src/test/java/software/amazon/awssdk/codegen/internal/QueryProtocolCustomTestInterceptor.java new file mode 100644 index 000000000000..6bd5206d9b11 --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/internal/QueryProtocolCustomTestInterceptor.java @@ -0,0 +1,12 @@ +package software.amazon.awssdk.codegen.internal; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.codegen.poet.builder.BuilderClassTest; + +/** + * Empty no-op test interceptor for query protocols to view generated code in test-query-sync-client-builder-class.java and + * test-query-async-client-builder-class.java and validate in {@link BuilderClassTest}. + */ +@SdkInternalApi +public class QueryProtocolCustomTestInterceptor { +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BuilderClassTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BuilderClassTest.java index b111e47bf3c0..3edafd55dab3 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BuilderClassTest.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BuilderClassTest.java @@ -58,6 +58,16 @@ public void baseQueryClientBuilderClass() throws Exception { validateQueryGeneration(BaseClientBuilderClass::new, "test-query-client-builder-class.java"); } + @Test + public void syncQueryClientBuilderClass() throws Exception { + validateQueryGeneration(SyncClientBuilderClass::new, "test-query-sync-client-builder-class.java"); + } + + @Test + public void asyncQueryClientBuilderClass() throws Exception { + validateQueryGeneration(AsyncClientBuilderClass::new, "test-query-async-client-builder-class.java"); + } + @Test public void syncClientBuilderInterface() throws Exception { validateGeneration(SyncClientBuilderInterface::new, "test-sync-client-builder-interface.java"); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-async-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-async-client-builder-class.java new file mode 100644 index 000000000000..f71429db299c --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-async-client-builder-class.java @@ -0,0 +1,61 @@ +package software.amazon.awssdk.services.query; + +import java.net.URI; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.auth.token.credentials.SdkTokenProvider; +import software.amazon.awssdk.awscore.client.config.AwsClientOption; +import software.amazon.awssdk.codegen.internal.QueryProtocolCustomTestInterceptor; +import software.amazon.awssdk.core.client.config.SdkClientConfiguration; +import software.amazon.awssdk.core.client.config.SdkClientOption; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.protocols.query.interceptor.QueryParametersToBodyInterceptor; +import software.amazon.awssdk.services.query.endpoints.QueryEndpointProvider; +import software.amazon.awssdk.utils.CollectionUtils; + +/** + * Internal implementation of {@link QueryAsyncClientBuilder}. + */ +@Generated("software.amazon.awssdk:codegen") +@SdkInternalApi +final class DefaultQueryAsyncClientBuilder extends DefaultQueryBaseClientBuilder + implements QueryAsyncClientBuilder { + @Override + public DefaultQueryAsyncClientBuilder endpointProvider(QueryEndpointProvider endpointProvider) { + clientConfiguration.option(SdkClientOption.ENDPOINT_PROVIDER, endpointProvider); + return this; + } + + @Override + public DefaultQueryAsyncClientBuilder tokenProvider(SdkTokenProvider tokenProvider) { + clientConfiguration.option(AwsClientOption.TOKEN_PROVIDER, tokenProvider); + return this; + } + + @Override + protected final QueryAsyncClient buildClient() { + SdkClientConfiguration clientConfiguration = super.asyncClientConfiguration(); + List interceptors = clientConfiguration.option(SdkClientOption.EXECUTION_INTERCEPTORS); + List queryParamsToBodyInterceptor = Collections + .singletonList(new QueryParametersToBodyInterceptor()); + List customizationInterceptors = new ArrayList<>(); + customizationInterceptors.add(new QueryProtocolCustomTestInterceptor()); + interceptors = CollectionUtils.mergeLists(queryParamsToBodyInterceptor, interceptors); + interceptors = CollectionUtils.mergeLists(customizationInterceptors, interceptors); + clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors) + .build(); + this.validateClientOptions(clientConfiguration); + URI endpointOverride = null; + if (clientConfiguration.option(SdkClientOption.ENDPOINT_OVERRIDDEN) != null + && Boolean.TRUE.equals(clientConfiguration.option(SdkClientOption.ENDPOINT_OVERRIDDEN))) { + endpointOverride = clientConfiguration.option(SdkClientOption.ENDPOINT); + } + QueryServiceClientConfiguration serviceClientConfiguration = QueryServiceClientConfiguration.builder() + .overrideConfiguration(overrideConfiguration()).region(clientConfiguration.option(AwsClientOption.AWS_REGION)) + .endpointOverride(endpointOverride).build(); + return new DefaultQueryAsyncClient(serviceClientConfiguration, clientConfiguration); + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-client-builder-class.java index 141b27f6cfe0..e1b5cf7bf055 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-client-builder-class.java @@ -1,7 +1,6 @@ package software.amazon.awssdk.services.query; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; @@ -17,7 +16,6 @@ import software.amazon.awssdk.core.interceptor.ClasspathInterceptorChainFactory; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; import software.amazon.awssdk.core.signer.Signer; -import software.amazon.awssdk.protocols.query.interceptor.QueryParametersToBodyInterceptor; import software.amazon.awssdk.services.query.endpoints.QueryClientContextParams; import software.amazon.awssdk.services.query.endpoints.QueryEndpointProvider; import software.amazon.awssdk.services.query.endpoints.internal.QueryEndpointAuthSchemeInterceptor; @@ -64,8 +62,6 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); interceptors = CollectionUtils.mergeLists(interceptors, config.option(SdkClientOption.EXECUTION_INTERCEPTORS)); - List protocolInterceptors = Collections.singletonList(new QueryParametersToBodyInterceptor()); - interceptors = CollectionUtils.mergeLists(interceptors, protocolInterceptors); return config.toBuilder().option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors) .option(SdkClientOption.CLIENT_CONTEXT_PARAMS, clientContextParams.build()).build(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-sync-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-sync-client-builder-class.java new file mode 100644 index 000000000000..56b94d1d3189 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-sync-client-builder-class.java @@ -0,0 +1,61 @@ +package software.amazon.awssdk.services.query; + +import java.net.URI; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.auth.token.credentials.SdkTokenProvider; +import software.amazon.awssdk.awscore.client.config.AwsClientOption; +import software.amazon.awssdk.codegen.internal.QueryProtocolCustomTestInterceptor; +import software.amazon.awssdk.core.client.config.SdkClientConfiguration; +import software.amazon.awssdk.core.client.config.SdkClientOption; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.protocols.query.interceptor.QueryParametersToBodyInterceptor; +import software.amazon.awssdk.services.query.endpoints.QueryEndpointProvider; +import software.amazon.awssdk.utils.CollectionUtils; + +/** + * Internal implementation of {@link QueryClientBuilder}. + */ +@Generated("software.amazon.awssdk:codegen") +@SdkInternalApi +final class DefaultQueryClientBuilder extends DefaultQueryBaseClientBuilder implements + QueryClientBuilder { + @Override + public DefaultQueryClientBuilder endpointProvider(QueryEndpointProvider endpointProvider) { + clientConfiguration.option(SdkClientOption.ENDPOINT_PROVIDER, endpointProvider); + return this; + } + + @Override + public DefaultQueryClientBuilder tokenProvider(SdkTokenProvider tokenProvider) { + clientConfiguration.option(AwsClientOption.TOKEN_PROVIDER, tokenProvider); + return this; + } + + @Override + protected final QueryClient buildClient() { + SdkClientConfiguration clientConfiguration = super.syncClientConfiguration(); + List interceptors = clientConfiguration.option(SdkClientOption.EXECUTION_INTERCEPTORS); + List queryParamsToBodyInterceptor = Collections + .singletonList(new QueryParametersToBodyInterceptor()); + List customizationInterceptors = new ArrayList<>(); + customizationInterceptors.add(new QueryProtocolCustomTestInterceptor()); + interceptors = CollectionUtils.mergeLists(queryParamsToBodyInterceptor, interceptors); + interceptors = CollectionUtils.mergeLists(customizationInterceptors, interceptors); + clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors) + .build(); + this.validateClientOptions(clientConfiguration); + URI endpointOverride = null; + if (clientConfiguration.option(SdkClientOption.ENDPOINT_OVERRIDDEN) != null + && Boolean.TRUE.equals(clientConfiguration.option(SdkClientOption.ENDPOINT_OVERRIDDEN))) { + endpointOverride = clientConfiguration.option(SdkClientOption.ENDPOINT); + } + QueryServiceClientConfiguration serviceClientConfiguration = QueryServiceClientConfiguration.builder() + .overrideConfiguration(overrideConfiguration()).region(clientConfiguration.option(AwsClientOption.AWS_REGION)) + .endpointOverride(endpointOverride).build(); + return new DefaultQueryClient(serviceClientConfiguration, clientConfiguration); + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/customization.config index c95b6d2e5f63..18824fa00a30 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/customization.config +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/customization.config @@ -2,7 +2,10 @@ "authPolicyActions" : { "skip" : true }, - "skipEndpointTests": { + "skipEndpointTests": { "test case 4": "Does not work" - } + }, + "interceptors": [ + "software.amazon.awssdk.codegen.internal.QueryProtocolCustomTestInterceptor" + ] } \ No newline at end of file diff --git a/core/annotations/pom.xml b/core/annotations/pom.xml index e7e56f9a5800..5d80d461b01d 100644 --- a/core/annotations/pom.xml +++ b/core/annotations/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 diff --git a/core/arns/pom.xml b/core/arns/pom.xml index 6944979856de..087a629e616c 100644 --- a/core/arns/pom.xml +++ b/core/arns/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 diff --git a/core/auth-crt/pom.xml b/core/auth-crt/pom.xml index 130d8a5a1c70..0627a09f5ee8 100644 --- a/core/auth-crt/pom.xml +++ b/core/auth-crt/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT auth-crt diff --git a/core/auth/pom.xml b/core/auth/pom.xml index 0f8b290f230c..d0bb2ca7fa60 100644 --- a/core/auth/pom.xml +++ b/core/auth/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT auth diff --git a/core/aws-core/pom.xml b/core/aws-core/pom.xml index c12ff8475e9b..77bc1c6facd6 100644 --- a/core/aws-core/pom.xml +++ b/core/aws-core/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT aws-core diff --git a/core/crt-core/pom.xml b/core/crt-core/pom.xml index dfe0b0732960..eb64fd171bfd 100644 --- a/core/crt-core/pom.xml +++ b/core/crt-core/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk core - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT crt-core diff --git a/core/endpoints-spi/pom.xml b/core/endpoints-spi/pom.xml index 01ef0d2848cb..8803b92b03d1 100644 --- a/core/endpoints-spi/pom.xml +++ b/core/endpoints-spi/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 diff --git a/core/imds/pom.xml b/core/imds/pom.xml index ba1674650cff..80ba1375e35e 100644 --- a/core/imds/pom.xml +++ b/core/imds/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 imds diff --git a/core/json-utils/pom.xml b/core/json-utils/pom.xml index 141eafed0731..bf11f0a8564b 100644 --- a/core/json-utils/pom.xml +++ b/core/json-utils/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 diff --git a/core/metrics-spi/pom.xml b/core/metrics-spi/pom.xml index 80d192bfeb42..33b9b1a5d67b 100644 --- a/core/metrics-spi/pom.xml +++ b/core/metrics-spi/pom.xml @@ -5,7 +5,7 @@ core software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 diff --git a/core/pom.xml b/core/pom.xml index b7e7841f57ca..303cbced5f39 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT core diff --git a/core/profiles/pom.xml b/core/profiles/pom.xml index a8b8d09aad6a..849ff52ab5ed 100644 --- a/core/profiles/pom.xml +++ b/core/profiles/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT profiles diff --git a/core/protocols/aws-cbor-protocol/pom.xml b/core/protocols/aws-cbor-protocol/pom.xml index a3266b7541be..15ae77258459 100644 --- a/core/protocols/aws-cbor-protocol/pom.xml +++ b/core/protocols/aws-cbor-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-json-protocol/pom.xml b/core/protocols/aws-json-protocol/pom.xml index 670e1fb18d2d..78690dd74564 100644 --- a/core/protocols/aws-json-protocol/pom.xml +++ b/core/protocols/aws-json-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-query-protocol/pom.xml b/core/protocols/aws-query-protocol/pom.xml index 1174ac010bff..4849b52929d1 100644 --- a/core/protocols/aws-query-protocol/pom.xml +++ b/core/protocols/aws-query-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-xml-protocol/pom.xml b/core/protocols/aws-xml-protocol/pom.xml index 04a36b1e9a34..4d0c5cd31eb6 100644 --- a/core/protocols/aws-xml-protocol/pom.xml +++ b/core/protocols/aws-xml-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 diff --git a/core/protocols/pom.xml b/core/protocols/pom.xml index 794661bd2148..7ecb321e5ee1 100644 --- a/core/protocols/pom.xml +++ b/core/protocols/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 diff --git a/core/protocols/protocol-core/pom.xml b/core/protocols/protocol-core/pom.xml index 916f1a3082da..8df7b3140d9e 100644 --- a/core/protocols/protocol-core/pom.xml +++ b/core/protocols/protocol-core/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 diff --git a/core/regions/pom.xml b/core/regions/pom.xml index 38c892a4722c..508edfabcf7f 100644 --- a/core/regions/pom.xml +++ b/core/regions/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT regions diff --git a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json index 8a12637e5344..344555c7d052 100644 --- a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json +++ b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json @@ -10485,6 +10485,7 @@ "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -11055,10 +11056,15 @@ "nimble" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, "eu-west-2" : { }, "us-east-1" : { }, + "us-east-2" : { }, "us-west-2" : { } } }, @@ -16334,6 +16340,37 @@ } } }, + "verifiedpermissions" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, "voice-chime" : { "endpoints" : { "ap-northeast-1" : { }, @@ -23414,6 +23451,12 @@ "us-iso-west-1" : { } } }, + "dlm" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, "dms" : { "defaults" : { "variants" : [ { @@ -24337,6 +24380,23 @@ "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", "regions" : { }, "services" : { } + }, { + "defaults" : { + "hostname" : "{service}.{region}.{dnsSuffix}", + "protocols" : [ "https" ], + "signatureVersions" : [ "v4" ], + "variants" : [ { + "dnsSuffix" : "csp.hci.ic.gov", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, + "dnsSuffix" : "csp.hci.ic.gov", + "partition" : "aws-iso-f", + "partitionName" : "AWS ISOF", + "regionRegex" : "^us\\-isof\\-\\w+\\-\\d+$", + "regions" : { }, + "services" : { } } ], "version" : 3 } \ No newline at end of file diff --git a/core/sdk-core/pom.xml b/core/sdk-core/pom.xml index 8bb378475af6..408447092258 100644 --- a/core/sdk-core/pom.xml +++ b/core/sdk-core/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk core - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT sdk-core AWS Java SDK :: SDK Core diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java index 7a1738f51d97..07dea1568089 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java @@ -22,37 +22,38 @@ import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.nio.file.Path; +import java.util.Arrays; import java.util.Optional; import java.util.concurrent.ExecutorService; import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; import software.amazon.awssdk.annotations.SdkPublicApi; -import software.amazon.awssdk.core.internal.async.ByteArrayAsyncRequestBody; +import software.amazon.awssdk.core.internal.async.ByteBuffersAsyncRequestBody; import software.amazon.awssdk.core.internal.async.FileAsyncRequestBody; import software.amazon.awssdk.core.internal.async.InputStreamWithExecutorAsyncRequestBody; import software.amazon.awssdk.core.internal.util.Mimetype; import software.amazon.awssdk.utils.BinaryUtils; /** - * Interface to allow non-blocking streaming of request content. This follows the reactive streams pattern where - * this interface is the {@link Publisher} of data (specifically {@link ByteBuffer} chunks) and the HTTP client is the Subscriber - * of the data (i.e. to write that data on the wire). + * Interface to allow non-blocking streaming of request content. This follows the reactive streams pattern where this interface is + * the {@link Publisher} of data (specifically {@link ByteBuffer} chunks) and the HTTP client is the Subscriber of the data (i.e. + * to write that data on the wire). * *

* {@link #subscribe(Subscriber)} should be implemented to tie this publisher to a subscriber. Ideally each call to subscribe - * should reproduce the content (i.e if you are reading from a file each subscribe call should produce a {@link - * org.reactivestreams.Subscription} that reads the file fully). This allows for automatic retries to be performed in the SDK. If - * the content is not reproducible, an exception may be thrown from any subsequent {@link #subscribe(Subscriber)} calls. + * should reproduce the content (i.e if you are reading from a file each subscribe call should produce a + * {@link org.reactivestreams.Subscription} that reads the file fully). This allows for automatic retries to be performed in the + * SDK. If the content is not reproducible, an exception may be thrown from any subsequent {@link #subscribe(Subscriber)} calls. *

* *

- * It is important to only send the number of chunks that the subscriber requests to avoid out of memory situations. - * The subscriber does it's own buffering so it's usually not needed to buffer in the publisher. Additional permits - * for chunks will be notified via the {@link org.reactivestreams.Subscription#request(long)} method. + * It is important to only send the number of chunks that the subscriber requests to avoid out of memory situations. The + * subscriber does it's own buffering so it's usually not needed to buffer in the publisher. Additional permits for chunks will be + * notified via the {@link org.reactivestreams.Subscription#request(long)} method. *

* * @see FileAsyncRequestBody - * @see ByteArrayAsyncRequestBody + * @see ByteBuffersAsyncRequestBody */ @SdkPublicApi public interface AsyncRequestBody extends SdkPublisher { @@ -70,8 +71,8 @@ default String contentType() { } /** - * Creates an {@link AsyncRequestBody} the produces data from the input ByteBuffer publisher. - * The data is delivered when the publisher publishes the data. + * Creates an {@link AsyncRequestBody} the produces data from the input ByteBuffer publisher. The data is delivered when the + * publisher publishes the data. * * @param publisher Publisher of source data * @return Implementation of {@link AsyncRequestBody} that produces data send by the publisher @@ -124,11 +125,11 @@ static AsyncRequestBody fromFile(File file) { * @param string The string to provide. * @param cs The {@link Charset} to use. * @return Implementation of {@link AsyncRequestBody} that uses the specified string. - * @see ByteArrayAsyncRequestBody + * @see ByteBuffersAsyncRequestBody */ static AsyncRequestBody fromString(String string, Charset cs) { - return new ByteArrayAsyncRequestBody(string.getBytes(cs), - Mimetype.MIMETYPE_TEXT_PLAIN + "; charset=" + cs.name()); + return ByteBuffersAsyncRequestBody.from(Mimetype.MIMETYPE_TEXT_PLAIN + "; charset=" + cs.name(), + string.getBytes(cs)); } /** @@ -143,29 +144,181 @@ static AsyncRequestBody fromString(String string) { } /** - * Creates a {@link AsyncRequestBody} from a byte array. The contents of the byte array are copied so modifications to the - * original byte array are not reflected in the {@link AsyncRequestBody}. + * Creates an {@link AsyncRequestBody} from a byte array. This will copy the contents of the byte array to prevent + * modifications to the provided byte array from being reflected in the {@link AsyncRequestBody}. * * @param bytes The bytes to send to the service. * @return AsyncRequestBody instance. */ static AsyncRequestBody fromBytes(byte[] bytes) { - return new ByteArrayAsyncRequestBody(bytes, Mimetype.MIMETYPE_OCTET_STREAM); + byte[] clonedBytes = bytes.clone(); + return ByteBuffersAsyncRequestBody.from(clonedBytes); } /** - * Creates a {@link AsyncRequestBody} from a {@link ByteBuffer}. Buffer contents are copied so any modifications - * made to the original {@link ByteBuffer} are not reflected in the {@link AsyncRequestBody}. + * Creates an {@link AsyncRequestBody} from a byte array without copying the contents of the byte array. This + * introduces concurrency risks, allowing: (1) the caller to modify the byte array stored in this {@code AsyncRequestBody} + * implementation AND (2) any users of {@link #fromBytesUnsafe(byte[])} to modify the byte array passed into this + * {@code AsyncRequestBody} implementation. + * + *

As the method name implies, this is unsafe. Use {@link #fromBytes(byte[])} unless you're sure you know the risks. + * + * @param bytes The bytes to send to the service. + * @return AsyncRequestBody instance. + */ + static AsyncRequestBody fromBytesUnsafe(byte[] bytes) { + return ByteBuffersAsyncRequestBody.from(bytes); + } + + /** + * Creates an {@link AsyncRequestBody} from a {@link ByteBuffer}. This will copy the contents of the {@link ByteBuffer} to + * prevent modifications to the provided {@link ByteBuffer} from being reflected in the {@link AsyncRequestBody}. + *

+ * NOTE: This method ignores the current read position. Use {@link #fromRemainingByteBuffer(ByteBuffer)} if you need + * it to copy only the remaining readable bytes. * * @param byteBuffer ByteBuffer to send to the service. * @return AsyncRequestBody instance. */ static AsyncRequestBody fromByteBuffer(ByteBuffer byteBuffer) { - return fromBytes(BinaryUtils.copyAllBytesFrom(byteBuffer)); + ByteBuffer immutableCopy = BinaryUtils.immutableCopyOf(byteBuffer); + immutableCopy.rewind(); + return ByteBuffersAsyncRequestBody.of((long) immutableCopy.remaining(), immutableCopy); + } + + /** + * Creates an {@link AsyncRequestBody} from the remaining readable bytes from a {@link ByteBuffer}. This will copy the + * remaining contents of the {@link ByteBuffer} to prevent modifications to the provided {@link ByteBuffer} from being + * reflected in the {@link AsyncRequestBody}. + *

Unlike {@link #fromByteBuffer(ByteBuffer)}, this method respects the current read position of the buffer and reads + * only the remaining bytes. + * + * @param byteBuffer ByteBuffer to send to the service. + * @return AsyncRequestBody instance. + */ + static AsyncRequestBody fromRemainingByteBuffer(ByteBuffer byteBuffer) { + ByteBuffer immutableCopy = BinaryUtils.immutableCopyOfRemaining(byteBuffer); + return ByteBuffersAsyncRequestBody.of((long) immutableCopy.remaining(), immutableCopy); + } + + /** + * Creates an {@link AsyncRequestBody} from a {@link ByteBuffer} without copying the contents of the + * {@link ByteBuffer}. This introduces concurrency risks, allowing the caller to modify the {@link ByteBuffer} stored in this + * {@code AsyncRequestBody} implementation. + *

+ * NOTE: This method ignores the current read position. Use {@link #fromRemainingByteBufferUnsafe(ByteBuffer)} if you + * need it to copy only the remaining readable bytes. + * + *

As the method name implies, this is unsafe. Use {@link #fromByteBuffer(ByteBuffer)}} unless you're sure you know the + * risks. + * + * @param byteBuffer ByteBuffer to send to the service. + * @return AsyncRequestBody instance. + */ + static AsyncRequestBody fromByteBufferUnsafe(ByteBuffer byteBuffer) { + ByteBuffer readOnlyBuffer = byteBuffer.asReadOnlyBuffer(); + readOnlyBuffer.rewind(); + return ByteBuffersAsyncRequestBody.of((long) readOnlyBuffer.remaining(), readOnlyBuffer); + } + + /** + * Creates an {@link AsyncRequestBody} from a {@link ByteBuffer} without copying the contents of the + * {@link ByteBuffer}. This introduces concurrency risks, allowing the caller to modify the {@link ByteBuffer} stored in this + * {@code AsyncRequestBody} implementation. + *

Unlike {@link #fromByteBufferUnsafe(ByteBuffer)}, this method respects the current read position of + * the buffer and reads only the remaining bytes. + * + *

As the method name implies, this is unsafe. Use {@link #fromByteBuffer(ByteBuffer)}} unless you're sure you know the + * risks. + * + * @param byteBuffer ByteBuffer to send to the service. + * @return AsyncRequestBody instance. + */ + static AsyncRequestBody fromRemainingByteBufferUnsafe(ByteBuffer byteBuffer) { + ByteBuffer readOnlyBuffer = byteBuffer.asReadOnlyBuffer(); + return ByteBuffersAsyncRequestBody.of((long) readOnlyBuffer.remaining(), readOnlyBuffer); + } + + /** + * Creates an {@link AsyncRequestBody} from a {@link ByteBuffer} array. This will copy the contents of each {@link ByteBuffer} + * to prevent modifications to any provided {@link ByteBuffer} from being reflected in the {@link AsyncRequestBody}. + *

+ * NOTE: This method ignores the current read position of each {@link ByteBuffer}. Use + * {@link #fromRemainingByteBuffers(ByteBuffer...)} if you need it to copy only the remaining readable bytes. + * + * @param byteBuffers ByteBuffer array to send to the service. + * @return AsyncRequestBody instance. + */ + static AsyncRequestBody fromByteBuffers(ByteBuffer... byteBuffers) { + ByteBuffer[] immutableCopy = Arrays.stream(byteBuffers) + .map(BinaryUtils::immutableCopyOf) + .peek(ByteBuffer::rewind) + .toArray(ByteBuffer[]::new); + return ByteBuffersAsyncRequestBody.of(immutableCopy); + } + + /** + * Creates an {@link AsyncRequestBody} from a {@link ByteBuffer} array. This will copy the remaining contents of each + * {@link ByteBuffer} to prevent modifications to any provided {@link ByteBuffer} from being reflected in the + * {@link AsyncRequestBody}. + *

Unlike {@link #fromByteBufferUnsafe(ByteBuffer)}, + * this method respects the current read position of each buffer and reads only the remaining bytes. + * + * @param byteBuffers ByteBuffer array to send to the service. + * @return AsyncRequestBody instance. + */ + static AsyncRequestBody fromRemainingByteBuffers(ByteBuffer... byteBuffers) { + ByteBuffer[] immutableCopy = Arrays.stream(byteBuffers) + .map(BinaryUtils::immutableCopyOfRemaining) + .peek(ByteBuffer::rewind) + .toArray(ByteBuffer[]::new); + return ByteBuffersAsyncRequestBody.of(immutableCopy); + } + + /** + * Creates an {@link AsyncRequestBody} from a {@link ByteBuffer} array without copying the contents of each + * {@link ByteBuffer}. This introduces concurrency risks, allowing the caller to modify any {@link ByteBuffer} stored in this + * {@code AsyncRequestBody} implementation. + *

+ * NOTE: This method ignores the current read position of each {@link ByteBuffer}. Use + * {@link #fromRemainingByteBuffers(ByteBuffer...)} if you need it to copy only the remaining readable bytes. + * + *

As the method name implies, this is unsafe. Use {@link #fromByteBuffers(ByteBuffer...)} unless you're sure you know the + * risks. + * + * @param byteBuffers ByteBuffer array to send to the service. + * @return AsyncRequestBody instance. + */ + static AsyncRequestBody fromByteBuffersUnsafe(ByteBuffer... byteBuffers) { + ByteBuffer[] readOnlyBuffers = Arrays.stream(byteBuffers) + .map(ByteBuffer::asReadOnlyBuffer) + .peek(ByteBuffer::rewind) + .toArray(ByteBuffer[]::new); + return ByteBuffersAsyncRequestBody.of(readOnlyBuffers); + } + + /** + * Creates an {@link AsyncRequestBody} from a {@link ByteBuffer} array without copying the contents of each + * {@link ByteBuffer}. This introduces concurrency risks, allowing the caller to modify any {@link ByteBuffer} stored in this + * {@code AsyncRequestBody} implementation. + *

Unlike {@link #fromByteBuffersUnsafe(ByteBuffer...)}, + * this method respects the current read position of each buffer and reads only the remaining bytes. + * + *

As the method name implies, this is unsafe. Use {@link #fromByteBuffers(ByteBuffer...)} unless you're sure you know the + * risks. + * + * @param byteBuffers ByteBuffer array to send to the service. + * @return AsyncRequestBody instance. + */ + static AsyncRequestBody fromRemainingByteBuffersUnsafe(ByteBuffer... byteBuffers) { + ByteBuffer[] readOnlyBuffers = Arrays.stream(byteBuffers) + .map(ByteBuffer::asReadOnlyBuffer) + .toArray(ByteBuffer[]::new); + return ByteBuffersAsyncRequestBody.of(readOnlyBuffers); } /** - * Creates a {@link AsyncRequestBody} from a {@link InputStream}. + * Creates an {@link AsyncRequestBody} from an {@link InputStream}. * *

An {@link ExecutorService} is required in order to perform the blocking data reads, to prevent blocking the * non-blocking event loop threads owned by the SDK. @@ -239,7 +392,7 @@ static BlockingOutputStreamAsyncRequestBody forBlockingOutputStream(Long content } /** - * Creates a {@link AsyncRequestBody} with no content. + * Creates an {@link AsyncRequestBody} with no content. * * @return AsyncRequestBody instance. */ diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteArrayAsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteArrayAsyncRequestBody.java deleted file mode 100644 index 29205479b798..000000000000 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteArrayAsyncRequestBody.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.core.internal.async; - -import java.nio.ByteBuffer; -import java.util.Optional; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.core.async.AsyncRequestBody; -import software.amazon.awssdk.utils.Logger; - -/** - * An implementation of {@link AsyncRequestBody} for providing data from memory. This is created using static - * methods on {@link AsyncRequestBody} - * - * @see AsyncRequestBody#fromBytes(byte[]) - * @see AsyncRequestBody#fromByteBuffer(ByteBuffer) - * @see AsyncRequestBody#fromString(String) - */ -@SdkInternalApi -public final class ByteArrayAsyncRequestBody implements AsyncRequestBody { - private static final Logger log = Logger.loggerFor(ByteArrayAsyncRequestBody.class); - - private final byte[] bytes; - - private final String mimetype; - - public ByteArrayAsyncRequestBody(byte[] bytes, String mimetype) { - this.bytes = bytes.clone(); - this.mimetype = mimetype; - } - - @Override - public Optional contentLength() { - return Optional.of((long) bytes.length); - } - - @Override - public String contentType() { - return mimetype; - } - - @Override - public void subscribe(Subscriber s) { - // As per rule 1.9 we must throw NullPointerException if the subscriber parameter is null - if (s == null) { - throw new NullPointerException("Subscription MUST NOT be null."); - } - - // As per 2.13, this method must return normally (i.e. not throw). - try { - s.onSubscribe( - new Subscription() { - private boolean done = false; - - @Override - public void request(long n) { - if (done) { - return; - } - if (n > 0) { - done = true; - s.onNext(ByteBuffer.wrap(bytes)); - s.onComplete(); - } else { - s.onError(new IllegalArgumentException("§3.9: non-positive requests are not allowed!")); - } - } - - @Override - public void cancel() { - synchronized (this) { - if (!done) { - done = true; - } - } - } - } - ); - } catch (Throwable ex) { - log.error(() -> s + " violated the Reactive Streams rule 2.13 by throwing an exception from onSubscribe.", ex); - } - } -} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteBuffersAsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteBuffersAsyncRequestBody.java new file mode 100644 index 000000000000..e7e9d00dd0e5 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteBuffersAsyncRequestBody.java @@ -0,0 +1,157 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.async; + +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.internal.util.Mimetype; +import software.amazon.awssdk.utils.BinaryUtils; +import software.amazon.awssdk.utils.Logger; + +/** + * An implementation of {@link AsyncRequestBody} for providing data from the supplied {@link ByteBuffer} array. This is created + * using static methods on {@link AsyncRequestBody} + * + * @see AsyncRequestBody#fromBytes(byte[]) + * @see AsyncRequestBody#fromBytesUnsafe(byte[]) + * @see AsyncRequestBody#fromByteBuffer(ByteBuffer) + * @see AsyncRequestBody#fromByteBufferUnsafe(ByteBuffer) + * @see AsyncRequestBody#fromByteBuffers(ByteBuffer...) + * @see AsyncRequestBody#fromByteBuffersUnsafe(ByteBuffer...) + * @see AsyncRequestBody#fromString(String) + */ +@SdkInternalApi +public final class ByteBuffersAsyncRequestBody implements AsyncRequestBody { + private static final Logger log = Logger.loggerFor(ByteBuffersAsyncRequestBody.class); + + private final String mimetype; + private final Long length; + private final ByteBuffer[] buffers; + + private ByteBuffersAsyncRequestBody(String mimetype, Long length, ByteBuffer... buffers) { + this.mimetype = mimetype; + this.length = length; + this.buffers = buffers; + } + + @Override + public Optional contentLength() { + return Optional.ofNullable(length); + } + + @Override + public String contentType() { + return mimetype; + } + + @Override + public void subscribe(Subscriber s) { + // As per rule 1.9 we must throw NullPointerException if the subscriber parameter is null + if (s == null) { + throw new NullPointerException("Subscription MUST NOT be null."); + } + + // As per 2.13, this method must return normally (i.e. not throw). + try { + s.onSubscribe( + new Subscription() { + private final AtomicInteger index = new AtomicInteger(0); + private final AtomicBoolean completed = new AtomicBoolean(false); + + @Override + public void request(long n) { + if (completed.get()) { + return; + } + + if (n > 0) { + int i = index.getAndIncrement(); + + if (i >= buffers.length) { + return; + } + + long remaining = n; + + do { + ByteBuffer buffer = buffers[i]; + + // Pending discussions on https://github.com/aws/aws-sdk-java-v2/issues/3928 + if (buffer.isDirect()) { + buffer = BinaryUtils.toNonDirectBuffer(buffer); + } + + s.onNext(buffer.asReadOnlyBuffer()); + remaining--; + } while (remaining > 0 && (i = index.getAndIncrement()) < buffers.length); + + if (i >= buffers.length - 1 && completed.compareAndSet(false, true)) { + s.onComplete(); + } + } else { + s.onError(new IllegalArgumentException("§3.9: non-positive requests are not allowed!")); + } + } + + @Override + public void cancel() { + completed.set(true); + } + } + ); + } catch (Throwable ex) { + log.error(() -> s + " violated the Reactive Streams rule 2.13 by throwing an exception from onSubscribe.", ex); + } + } + + public static ByteBuffersAsyncRequestBody of(ByteBuffer... buffers) { + long length = Arrays.stream(buffers) + .mapToLong(ByteBuffer::remaining) + .sum(); + return new ByteBuffersAsyncRequestBody(Mimetype.MIMETYPE_OCTET_STREAM, length, buffers); + } + + public static ByteBuffersAsyncRequestBody of(Long length, ByteBuffer... buffers) { + return new ByteBuffersAsyncRequestBody(Mimetype.MIMETYPE_OCTET_STREAM, length, buffers); + } + + public static ByteBuffersAsyncRequestBody of(String mimetype, ByteBuffer... buffers) { + long length = Arrays.stream(buffers) + .mapToLong(ByteBuffer::remaining) + .sum(); + return new ByteBuffersAsyncRequestBody(mimetype, length, buffers); + } + + public static ByteBuffersAsyncRequestBody of(String mimetype, Long length, ByteBuffer... buffers) { + return new ByteBuffersAsyncRequestBody(mimetype, length, buffers); + } + + public static ByteBuffersAsyncRequestBody from(byte[] bytes) { + return new ByteBuffersAsyncRequestBody(Mimetype.MIMETYPE_OCTET_STREAM, (long) bytes.length, + ByteBuffer.wrap(bytes)); + } + + public static ByteBuffersAsyncRequestBody from(String mimetype, byte[] bytes) { + return new ByteBuffersAsyncRequestBody(mimetype, (long) bytes.length, ByteBuffer.wrap(bytes)); + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java index 8fd7f0260b76..93d6d09578a6 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java @@ -22,6 +22,7 @@ import java.util.List; import java.util.concurrent.atomic.AtomicLong; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.BinaryUtils; import software.amazon.awssdk.utils.Validate; import software.amazon.awssdk.utils.builder.SdkBuilder; @@ -58,10 +59,11 @@ public synchronized Iterable bufferAndCreateChunks(ByteBuffer buffer int availableToRead = bufferSize - bufferedBytes; int bytesToMove = Math.min(availableToRead, currentBytesRead - startPosition); + byte[] bytes = BinaryUtils.copyAllBytesFrom(buffer); if (bufferedBytes == 0) { - currentBuffer.put(buffer.array(), startPosition, bytesToMove); + currentBuffer.put(bytes, startPosition, bytesToMove); } else { - currentBuffer.put(buffer.array(), 0, bytesToMove); + currentBuffer.put(bytes, 0, bytesToMove); } startPosition = startPosition + bytesToMove; diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AfterTransmissionExecutionInterceptorsStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AfterTransmissionExecutionInterceptorsStage.java index 7521219a5030..a7cada02b06c 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AfterTransmissionExecutionInterceptorsStage.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AfterTransmissionExecutionInterceptorsStage.java @@ -31,7 +31,7 @@ public class AfterTransmissionExecutionInterceptorsStage @Override public Pair execute(Pair input, RequestExecutionContext context) throws Exception { - InterruptMonitor.checkInterrupted(); + InterruptMonitor.checkInterrupted(input.right()); // Update interceptor context InterceptorContext interceptorContext = context.executionContext().interceptorContext().copy(b -> b.httpResponse(input.right()) diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/HttpChecksumInHeaderInterceptor.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/HttpChecksumInHeaderInterceptor.java index 0ddf70959cae..f3c92a254bec 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/HttpChecksumInHeaderInterceptor.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/HttpChecksumInHeaderInterceptor.java @@ -15,7 +15,6 @@ package software.amazon.awssdk.core.internal.interceptor; -import static software.amazon.awssdk.core.HttpChecksumConstant.HTTP_CHECKSUM_VALUE; import static software.amazon.awssdk.core.HttpChecksumConstant.SIGNING_METHOD; import static software.amazon.awssdk.core.internal.util.HttpChecksumResolver.getResolvedChecksumSpecs; @@ -23,7 +22,6 @@ import java.io.UncheckedIOException; import java.util.Optional; import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.core.checksums.Algorithm; import software.amazon.awssdk.core.checksums.ChecksumSpecs; import software.amazon.awssdk.core.interceptor.Context; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; @@ -47,49 +45,27 @@ @SdkInternalApi public class HttpChecksumInHeaderInterceptor implements ExecutionInterceptor { - @Override - public void afterMarshalling(Context.AfterMarshalling context, ExecutionAttributes executionAttributes) { - ChecksumSpecs headerChecksumSpecs = HttpChecksumUtils.checksumSpecWithRequestAlgorithm(executionAttributes).orElse(null); - - if (shouldSkipHttpChecksumInHeader(context, executionAttributes, headerChecksumSpecs)) { - return; - } - Optional syncContent = context.requestBody(); - syncContent.ifPresent( - requestBody -> saveContentChecksum(requestBody, executionAttributes, headerChecksumSpecs.algorithm())); - } - - @Override - public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) { - ChecksumSpecs checksumSpecs = getResolvedChecksumSpecs(executionAttributes); - - if (shouldSkipHttpChecksumInHeader(context, executionAttributes, checksumSpecs)) { - return context.httpRequest(); - } - - String httpChecksumValue = executionAttributes.getAttribute(HTTP_CHECKSUM_VALUE); - if (httpChecksumValue != null) { - return context.httpRequest().copy(r -> r.putHeader(checksumSpecs.headerName(), httpChecksumValue)); - } - return context.httpRequest(); - - } - /** - * Calculates the checksumSpecs of the provided request (and base64 encodes it), storing the result in - * executionAttribute "HttpChecksumValue". + * Calculates the checksum of the provided request (and base64 encodes it), and adds the header to the request. * *

Note: This assumes that the content stream provider can create multiple new streams. If it only supports one (e.g. with * an input stream that doesn't support mark/reset), we could consider buffering the content in memory here and updating the * request body to use that buffered content. We obviously don't want to do that for giant streams, so we haven't opted to do * that yet. */ - private static void saveContentChecksum(RequestBody requestBody, ExecutionAttributes executionAttributes, - Algorithm algorithm) { + @Override + public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) { + ChecksumSpecs checksumSpecs = getResolvedChecksumSpecs(executionAttributes); + Optional syncContent = context.requestBody(); + + if (shouldSkipHttpChecksumInHeader(context, executionAttributes, checksumSpecs) || !syncContent.isPresent()) { + return context.httpRequest(); + } + try { String payloadChecksum = BinaryUtils.toBase64(HttpChecksumUtils.computeChecksum( - requestBody.contentStreamProvider().newStream(), algorithm)); - executionAttributes.putAttribute(HTTP_CHECKSUM_VALUE, payloadChecksum); + syncContent.get().contentStreamProvider().newStream(), checksumSpecs.algorithm())); + return context.httpRequest().copy(r -> r.putHeader(checksumSpecs.headerName(), payloadChecksum)); } catch (IOException e) { throw new UncheckedIOException(e); } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/HttpChecksumRequiredInterceptor.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/HttpChecksumRequiredInterceptor.java index c98cde397f0c..9729cd2076d7 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/HttpChecksumRequiredInterceptor.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/HttpChecksumRequiredInterceptor.java @@ -21,7 +21,6 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.interceptor.Context; -import software.amazon.awssdk.core.interceptor.ExecutionAttribute; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; @@ -41,10 +40,17 @@ */ @SdkInternalApi public class HttpChecksumRequiredInterceptor implements ExecutionInterceptor { - private static final ExecutionAttribute CONTENT_MD5_VALUE = new ExecutionAttribute<>("ContentMd5"); + /** + * Calculates the MD5 checksum of the provided request (and base64 encodes it), and adds the header to the request. + * + *

Note: This assumes that the content stream provider can create multiple new streams. If it only supports one (e.g. with + * an input stream that doesn't support mark/reset), we could consider buffering the content in memory here and updating the + * request body to use that buffered content. We obviously don't want to do that for giant streams, so we haven't opted to do + * that yet. + */ @Override - public void afterMarshalling(Context.AfterMarshalling context, ExecutionAttributes executionAttributes) { + public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) { boolean isHttpChecksumRequired = isHttpChecksumRequired(executionAttributes); boolean requestAlreadyHasMd5 = context.httpRequest().firstMatchingHeader(Header.CONTENT_MD5).isPresent(); @@ -52,7 +58,7 @@ public void afterMarshalling(Context.AfterMarshalling context, ExecutionAttribut Optional asyncContent = context.asyncRequestBody(); if (!isHttpChecksumRequired || requestAlreadyHasMd5) { - return; + return context.httpRequest(); } if (asyncContent.isPresent()) { @@ -60,14 +66,13 @@ public void afterMarshalling(Context.AfterMarshalling context, ExecutionAttribut + "for non-blocking content."); } - syncContent.ifPresent(requestBody -> saveContentMd5(requestBody, executionAttributes)); - } - - @Override - public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) { - String contentMd5 = executionAttributes.getAttribute(CONTENT_MD5_VALUE); - if (contentMd5 != null) { - return context.httpRequest().copy(r -> r.putHeader(Header.CONTENT_MD5, contentMd5)); + if (syncContent.isPresent()) { + try { + String payloadMd5 = Md5Utils.md5AsBase64(syncContent.get().contentStreamProvider().newStream()); + return context.httpRequest().copy(r -> r.putHeader(Header.CONTENT_MD5, payloadMd5)); + } catch (IOException e) { + throw new UncheckedIOException(e); + } } return context.httpRequest(); } @@ -76,22 +81,4 @@ private boolean isHttpChecksumRequired(ExecutionAttributes executionAttributes) return executionAttributes.getAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED) != null || HttpChecksumUtils.isMd5ChecksumRequired(executionAttributes); } - - /** - * Calculates the MD5 checksum of the provided request (and base64 encodes it), storing the result in - * {@link #CONTENT_MD5_VALUE}. - * - *

Note: This assumes that the content stream provider can create multiple new streams. If it only supports one (e.g. with - * an input stream that doesn't support mark/reset), we could consider buffering the content in memory here and updating the - * request body to use that buffered content. We obviously don't want to do that for giant streams, so we haven't opted to do - * that yet. - */ - private void saveContentMd5(RequestBody requestBody, ExecutionAttributes executionAttributes) { - try { - String payloadMd5 = Md5Utils.md5AsBase64(requestBody.contentStreamProvider().newStream()); - executionAttributes.putAttribute(CONTENT_MD5_VALUE, payloadMd5); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/ClassLoaderHelper.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/ClassLoaderHelper.java index 3b5b50f7e9a0..2894b2bd8dc4 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/ClassLoaderHelper.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/ClassLoaderHelper.java @@ -69,8 +69,7 @@ private static Class loadClassViaContext(String fqcn) { * @throws ClassNotFoundException * if failed to load the class */ - public static Class loadClass(String fqcn, Class... classes) - throws ClassNotFoundException { + public static Class loadClass(String fqcn, Class... classes) throws ClassNotFoundException { return loadClass(fqcn, true, classes); } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/waiters/WaiterExecutor.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/waiters/WaiterExecutor.java index 8610c32e49f3..5377e0f04e59 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/waiters/WaiterExecutor.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/waiters/WaiterExecutor.java @@ -16,14 +16,12 @@ package software.amazon.awssdk.core.internal.waiters; import java.util.List; -import java.util.Optional; import java.util.function.Supplier; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.annotations.ThreadSafe; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.waiters.WaiterAcceptor; import software.amazon.awssdk.core.waiters.WaiterResponse; -import software.amazon.awssdk.core.waiters.WaiterState; import software.amazon.awssdk.utils.Either; import software.amazon.awssdk.utils.Validate; @@ -45,45 +43,42 @@ public WaiterExecutor(WaiterConfiguration configuration, } WaiterResponse execute(Supplier pollingFunction) { - return doExecute(pollingFunction, 0, System.currentTimeMillis()); - } - - WaiterResponse doExecute(Supplier pollingFunction, int attemptNumber, long startTime) { - attemptNumber++; - T response; - try { - response = pollingFunction.get(); - } catch (Exception exception) { - return evaluate(pollingFunction, Either.right(exception), attemptNumber, startTime); - } - - return evaluate(pollingFunction, Either.left(response), attemptNumber, startTime); - } + int attemptNumber = 0; + long startTime = System.currentTimeMillis(); - private WaiterResponse evaluate(Supplier pollingFunction, - Either responseOrException, - int attemptNumber, - long startTime) { - Optional> waiterAcceptor = executorHelper.firstWaiterAcceptorIfMatched(responseOrException); + while (true) { + attemptNumber++; - if (waiterAcceptor.isPresent()) { - WaiterState state = waiterAcceptor.get().waiterState(); - switch (state) { + Either polledResponse = pollResponse(pollingFunction); + WaiterAcceptor waiterAcceptor = firstWaiterAcceptor(polledResponse); + switch (waiterAcceptor.waiterState()) { case SUCCESS: - return executorHelper.createWaiterResponse(responseOrException, attemptNumber); + return executorHelper.createWaiterResponse(polledResponse, attemptNumber); case RETRY: - return maybeRetry(pollingFunction, attemptNumber, startTime); + waitToRetry(attemptNumber, startTime); + break; case FAILURE: - throw executorHelper.waiterFailureException(waiterAcceptor.get()); + throw executorHelper.waiterFailureException(waiterAcceptor); default: throw new UnsupportedOperationException(); } } + } + + private Either pollResponse(Supplier pollingFunction) { + try { + return Either.left(pollingFunction.get()); + } catch (Exception exception) { + return Either.right(exception); + } + } - throw executorHelper.noneMatchException(responseOrException); + private WaiterAcceptor firstWaiterAcceptor(Either responseOrException) { + return executorHelper.firstWaiterAcceptorIfMatched(responseOrException) + .orElseThrow(() -> executorHelper.noneMatchException(responseOrException)); } - private WaiterResponse maybeRetry(Supplier pollingFunction, int attemptNumber, long startTime) { + private void waitToRetry(int attemptNumber, long startTime) { Either nextDelayOrUnretryableException = executorHelper.nextDelayOrUnretryableException(attemptNumber, startTime); @@ -97,6 +92,5 @@ private WaiterResponse maybeRetry(Supplier pollingFunction, int attemptNum Thread.currentThread().interrupt(); throw SdkClientException.create("The thread got interrupted", e); } - return doExecute(pollingFunction, attemptNumber, startTime); } } diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/AsyncRequestBodyTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/AsyncRequestBodyTest.java index e0252c9ba6d2..aab643cbb6a6 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/AsyncRequestBodyTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/AsyncRequestBodyTest.java @@ -15,44 +15,39 @@ package software.amazon.awssdk.core.async; -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import com.google.common.jimfs.Configuration; import com.google.common.jimfs.Jimfs; import io.reactivex.Flowable; -import java.io.File; -import java.io.FileWriter; import java.io.IOException; -import java.io.InputStream; import java.nio.ByteBuffer; +import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.nio.file.FileSystem; import java.nio.file.Files; import java.nio.file.Path; -import java.time.Instant; -import java.util.Collections; import java.util.List; -import java.util.concurrent.Callable; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; import java.util.stream.Collectors; import org.assertj.core.util.Lists; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; import software.amazon.awssdk.core.internal.util.Mimetype; import software.amazon.awssdk.http.async.SimpleSubscriber; import software.amazon.awssdk.utils.BinaryUtils; -import software.amazon.awssdk.utils.StringInputStream; -@RunWith(Parameterized.class) public class AsyncRequestBodyTest { - private final static String testString = "Hello!"; - private final static Path path; + + private static final String testString = "Hello!"; + private static final Path path; static { FileSystem fs = Jimfs.newFileSystem(Configuration.unix()); @@ -64,27 +59,16 @@ public class AsyncRequestBodyTest { } } - @Parameterized.Parameters - public static AsyncRequestBody[] data() { - return new AsyncRequestBody[]{ - AsyncRequestBody.fromString(testString), - AsyncRequestBody.fromFile(path) - }; + @ParameterizedTest + @MethodSource("contentIntegrityChecks") + void hasCorrectLength(AsyncRequestBody asyncRequestBody) { + assertEquals(testString.length(), asyncRequestBody.contentLength().get()); } - private AsyncRequestBody provider; - - public AsyncRequestBodyTest(AsyncRequestBody provider) { - this.provider = provider; - } - @Test - public void hasCorrectLength() { - assertThat(provider.contentLength().get()).isEqualTo(testString.length()); - } - - @Test - public void hasCorrectContent() throws InterruptedException { + @ParameterizedTest + @MethodSource("contentIntegrityChecks") + void hasCorrectContent(AsyncRequestBody asyncRequestBody) throws InterruptedException { StringBuilder sb = new StringBuilder(); CountDownLatch done = new CountDownLatch(1); @@ -106,75 +90,268 @@ public void onComplete() { } }; - provider.subscribe(subscriber); + asyncRequestBody.subscribe(subscriber); done.await(); - assertThat(sb.toString()).isEqualTo(testString); + assertEquals(testString, sb.toString()); + } + + private static AsyncRequestBody[] contentIntegrityChecks() { + return new AsyncRequestBody[] { + AsyncRequestBody.fromString(testString), + AsyncRequestBody.fromFile(path) + }; } @Test - public void stringConstructorHasCorrectContentType() { - AsyncRequestBody requestBody = AsyncRequestBody.fromString("hello world"); - assertThat(requestBody.contentType()).isEqualTo("text/plain; charset=UTF-8"); + void fromBytesCopiesTheProvidedByteArray() { + byte[] bytes = testString.getBytes(StandardCharsets.UTF_8); + byte[] bytesClone = bytes.clone(); + + AsyncRequestBody asyncRequestBody = AsyncRequestBody.fromBytes(bytes); + + for (int i = 0; i < bytes.length; i++) { + bytes[i] += 1; + } + + AtomicReference publishedBuffer = new AtomicReference<>(); + Subscriber subscriber = new SimpleSubscriber(publishedBuffer::set); + + asyncRequestBody.subscribe(subscriber); + + byte[] publishedByteArray = BinaryUtils.copyAllBytesFrom(publishedBuffer.get()); + assertArrayEquals(bytesClone, publishedByteArray); } @Test - public void stringWithEncoding1ConstructorHasCorrectContentType() { - AsyncRequestBody requestBody = AsyncRequestBody.fromString("hello world", StandardCharsets.ISO_8859_1); - assertThat(requestBody.contentType()).isEqualTo("text/plain; charset=ISO-8859-1"); + void fromBytesUnsafeDoesNotCopyTheProvidedByteArray() { + byte[] bytes = testString.getBytes(StandardCharsets.UTF_8); + + AsyncRequestBody asyncRequestBody = AsyncRequestBody.fromBytesUnsafe(bytes); + + for (int i = 0; i < bytes.length; i++) { + bytes[i] += 1; + } + + AtomicReference publishedBuffer = new AtomicReference<>(); + Subscriber subscriber = new SimpleSubscriber(publishedBuffer::set); + + asyncRequestBody.subscribe(subscriber); + + byte[] publishedByteArray = BinaryUtils.copyAllBytesFrom(publishedBuffer.get()); + assertArrayEquals(bytes, publishedByteArray); + } + + @ParameterizedTest + @MethodSource("safeByteBufferBodyBuilders") + void safeByteBufferBuildersCopyTheProvidedBuffer(Function bodyBuilder) { + byte[] bytes = testString.getBytes(StandardCharsets.UTF_8); + byte[] bytesClone = bytes.clone(); + + AsyncRequestBody asyncRequestBody = bodyBuilder.apply(ByteBuffer.wrap(bytes)); + + for (int i = 0; i < bytes.length; i++) { + bytes[i] += 1; + } + + AtomicReference publishedBuffer = new AtomicReference<>(); + Subscriber subscriber = new SimpleSubscriber(publishedBuffer::set); + + asyncRequestBody.subscribe(subscriber); + + byte[] publishedByteArray = BinaryUtils.copyAllBytesFrom(publishedBuffer.get()); + assertArrayEquals(bytesClone, publishedByteArray); + } + + private static Function[] safeByteBufferBodyBuilders() { + Function fromByteBuffer = AsyncRequestBody::fromByteBuffer; + Function fromRemainingByteBuffer = AsyncRequestBody::fromRemainingByteBuffer; + Function fromByteBuffers = AsyncRequestBody::fromByteBuffers; + Function fromRemainingByteBuffers = AsyncRequestBody::fromRemainingByteBuffers; + return new Function[] {fromByteBuffer, fromRemainingByteBuffer, fromByteBuffers, fromRemainingByteBuffers}; + } + + @ParameterizedTest + @MethodSource("unsafeByteBufferBodyBuilders") + void unsafeByteBufferBuildersDoNotCopyTheProvidedBuffer(Function bodyBuilder) { + byte[] bytes = testString.getBytes(StandardCharsets.UTF_8); + + AsyncRequestBody asyncRequestBody = bodyBuilder.apply(ByteBuffer.wrap(bytes)); + + for (int i = 0; i < bytes.length; i++) { + bytes[i] += 1; + } + + AtomicReference publishedBuffer = new AtomicReference<>(); + Subscriber subscriber = new SimpleSubscriber(publishedBuffer::set); + + asyncRequestBody.subscribe(subscriber); + + byte[] publishedByteArray = BinaryUtils.copyAllBytesFrom(publishedBuffer.get()); + assertArrayEquals(bytes, publishedByteArray); + } + + private static Function[] unsafeByteBufferBodyBuilders() { + Function fromByteBuffer = AsyncRequestBody::fromByteBufferUnsafe; + Function fromRemainingByteBuffer = AsyncRequestBody::fromRemainingByteBufferUnsafe; + Function fromByteBuffers = AsyncRequestBody::fromByteBuffersUnsafe; + Function fromRemainingByteBuffers = AsyncRequestBody::fromRemainingByteBuffersUnsafe; + return new Function[] {fromByteBuffer, fromRemainingByteBuffer, fromByteBuffers, fromRemainingByteBuffers}; + } + + @ParameterizedTest + @MethodSource("nonRewindingByteBufferBodyBuilders") + void nonRewindingByteBufferBuildersReadFromTheInputBufferPosition( + Function bodyBuilder) { + byte[] bytes = testString.getBytes(StandardCharsets.UTF_8); + ByteBuffer bb = ByteBuffer.wrap(bytes); + int expectedPosition = bytes.length / 2; + bb.position(expectedPosition); + + AsyncRequestBody asyncRequestBody = bodyBuilder.apply(bb); + + AtomicReference publishedBuffer = new AtomicReference<>(); + Subscriber subscriber = new SimpleSubscriber(publishedBuffer::set); + + asyncRequestBody.subscribe(subscriber); + + int remaining = bb.remaining(); + assertEquals(remaining, publishedBuffer.get().remaining()); + for (int i = 0; i < remaining; i++) { + assertEquals(bb.get(), publishedBuffer.get().get()); + } + } + + private static Function[] nonRewindingByteBufferBodyBuilders() { + Function fromRemainingByteBuffer = AsyncRequestBody::fromRemainingByteBuffer; + Function fromRemainingByteBufferUnsafe = AsyncRequestBody::fromRemainingByteBufferUnsafe; + Function fromRemainingByteBuffers = AsyncRequestBody::fromRemainingByteBuffers; + Function fromRemainingByteBuffersUnsafe = AsyncRequestBody::fromRemainingByteBuffersUnsafe; + return new Function[] {fromRemainingByteBuffer, fromRemainingByteBufferUnsafe, fromRemainingByteBuffers, + fromRemainingByteBuffersUnsafe}; + } + + @ParameterizedTest + @MethodSource("safeNonRewindingByteBufferBodyBuilders") + void safeNonRewindingByteBufferBuildersCopyFromTheInputBufferPosition( + Function bodyBuilder) { + byte[] bytes = testString.getBytes(StandardCharsets.UTF_8); + ByteBuffer bb = ByteBuffer.wrap(bytes); + int expectedPosition = bytes.length / 2; + bb.position(expectedPosition); + + AsyncRequestBody asyncRequestBody = bodyBuilder.apply(bb); + + AtomicReference publishedBuffer = new AtomicReference<>(); + Subscriber subscriber = new SimpleSubscriber(publishedBuffer::set); + + asyncRequestBody.subscribe(subscriber); + + int remaining = bb.remaining(); + assertEquals(remaining, publishedBuffer.get().capacity()); + for (int i = 0; i < remaining; i++) { + assertEquals(bb.get(), publishedBuffer.get().get()); + } + } + + private static Function[] safeNonRewindingByteBufferBodyBuilders() { + Function fromRemainingByteBuffer = AsyncRequestBody::fromRemainingByteBuffer; + Function fromRemainingByteBuffers = AsyncRequestBody::fromRemainingByteBuffers; + return new Function[] {fromRemainingByteBuffer, fromRemainingByteBuffers}; + } + + @ParameterizedTest + @MethodSource("rewindingByteBufferBodyBuilders") + void rewindingByteBufferBuildersDoNotRewindTheInputBuffer(Function bodyBuilder) { + byte[] bytes = testString.getBytes(StandardCharsets.UTF_8); + ByteBuffer bb = ByteBuffer.wrap(bytes); + int expectedPosition = bytes.length / 2; + bb.position(expectedPosition); + + AsyncRequestBody asyncRequestBody = bodyBuilder.apply(bb); + + Subscriber subscriber = new SimpleSubscriber(buffer -> { + }); + + asyncRequestBody.subscribe(subscriber); + + assertEquals(expectedPosition, bb.position()); + } + + @ParameterizedTest + @MethodSource("rewindingByteBufferBodyBuilders") + void rewindingByteBufferBuildersReadTheInputBufferFromTheBeginning( + Function bodyBuilder) { + byte[] bytes = testString.getBytes(StandardCharsets.UTF_8); + ByteBuffer bb = ByteBuffer.wrap(bytes); + bb.position(bytes.length / 2); + + AsyncRequestBody asyncRequestBody = bodyBuilder.apply(bb); + + AtomicReference publishedBuffer = new AtomicReference<>(); + Subscriber subscriber = new SimpleSubscriber(publishedBuffer::set); + + asyncRequestBody.subscribe(subscriber); + + assertEquals(0, publishedBuffer.get().position()); + publishedBuffer.get().rewind(); + bb.rewind(); + assertEquals(bb, publishedBuffer.get()); + } + + private static Function[] rewindingByteBufferBodyBuilders() { + Function fromByteBuffer = AsyncRequestBody::fromByteBuffer; + Function fromByteBufferUnsafe = AsyncRequestBody::fromByteBufferUnsafe; + Function fromByteBuffers = AsyncRequestBody::fromByteBuffers; + Function fromByteBuffersUnsafe = AsyncRequestBody::fromByteBuffersUnsafe; + return new Function[] {fromByteBuffer, fromByteBufferUnsafe, fromByteBuffers, fromByteBuffersUnsafe}; + } + + @ParameterizedTest + @ValueSource(strings = {"US-ASCII", "ISO-8859-1", "UTF-8", "UTF-16BE", "UTF-16LE", "UTF-16"}) + void charsetsAreConvertedToTheCorrectContentType(Charset charset) { + AsyncRequestBody requestBody = AsyncRequestBody.fromString("hello world", charset); + assertEquals("text/plain; charset=" + charset.name(), requestBody.contentType()); } @Test - public void stringWithEncoding2ConstructorHasCorrectContentType() { - AsyncRequestBody requestBody = AsyncRequestBody.fromString("hello world", StandardCharsets.UTF_16BE); - assertThat(requestBody.contentType()).isEqualTo("text/plain; charset=UTF-16BE"); + void stringConstructorHasCorrectDefaultContentType() { + AsyncRequestBody requestBody = AsyncRequestBody.fromString("hello world"); + assertEquals("text/plain; charset=UTF-8", requestBody.contentType()); } @Test - public void fileConstructorHasCorrectContentType() { + void fileConstructorHasCorrectContentType() { AsyncRequestBody requestBody = AsyncRequestBody.fromFile(path); - assertThat(requestBody.contentType()).isEqualTo(Mimetype.MIMETYPE_OCTET_STREAM); + assertEquals(Mimetype.MIMETYPE_OCTET_STREAM, requestBody.contentType()); } @Test - public void bytesArrayConstructorHasCorrectContentType() { + void bytesArrayConstructorHasCorrectContentType() { AsyncRequestBody requestBody = AsyncRequestBody.fromBytes("hello world".getBytes()); - assertThat(requestBody.contentType()).isEqualTo(Mimetype.MIMETYPE_OCTET_STREAM); + assertEquals(Mimetype.MIMETYPE_OCTET_STREAM, requestBody.contentType()); } @Test - public void bytesBufferConstructorHasCorrectContentType() { + void bytesBufferConstructorHasCorrectContentType() { ByteBuffer byteBuffer = ByteBuffer.wrap("hello world".getBytes()); AsyncRequestBody requestBody = AsyncRequestBody.fromByteBuffer(byteBuffer); - assertThat(requestBody.contentType()).isEqualTo(Mimetype.MIMETYPE_OCTET_STREAM); + assertEquals(Mimetype.MIMETYPE_OCTET_STREAM, requestBody.contentType()); } @Test - public void emptyBytesConstructorHasCorrectContentType() { + void emptyBytesConstructorHasCorrectContentType() { AsyncRequestBody requestBody = AsyncRequestBody.empty(); - assertThat(requestBody.contentType()).isEqualTo(Mimetype.MIMETYPE_OCTET_STREAM); + assertEquals(Mimetype.MIMETYPE_OCTET_STREAM, requestBody.contentType()); } @Test - public void publisherConstructorHasCorrectContentType() { + void publisherConstructorHasCorrectContentType() { List requestBodyStrings = Lists.newArrayList("A", "B", "C"); List bodyBytes = requestBodyStrings.stream() - .map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) - .collect(Collectors.toList()); + .map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) + .collect(Collectors.toList()); Publisher bodyPublisher = Flowable.fromIterable(bodyBytes); AsyncRequestBody requestBody = AsyncRequestBody.fromPublisher(bodyPublisher); - assertThat(requestBody.contentType()).isEqualTo(Mimetype.MIMETYPE_OCTET_STREAM); - } - - @Test - public void fromBytes_byteArrayNotNull_createsCopy() { - byte[] original = {0x1, 0x2, 0x3, 0x4}; - byte[] toModify = new byte[original.length]; - System.arraycopy(original, 0, toModify, 0, original.length); - AsyncRequestBody body = AsyncRequestBody.fromBytes(toModify); - for (int i = 0; i < toModify.length; ++i) { - toModify[i]++; - } - ByteBuffer publishedBb = Flowable.fromPublisher(body).toList().blockingGet().get(0); - assertThat(BinaryUtils.copyAllBytesFrom(publishedBb)).isEqualTo(original); + assertEquals(Mimetype.MIMETYPE_OCTET_STREAM, requestBody.contentType()); } } diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ByteArrayAsyncRequestBodyTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ByteArrayAsyncRequestBodyTest.java deleted file mode 100644 index 378fbf2f59c3..000000000000 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ByteArrayAsyncRequestBodyTest.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.core.internal.async; - -import static org.junit.jupiter.api.Assertions.assertTrue; - -import java.nio.ByteBuffer; -import java.util.concurrent.atomic.AtomicBoolean; -import org.junit.jupiter.api.Test; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; -import software.amazon.awssdk.core.internal.util.Mimetype; - -public class ByteArrayAsyncRequestBodyTest { - private class testSubscriber implements Subscriber { - private Subscription subscription; - protected AtomicBoolean onCompleteCalled = new AtomicBoolean(false); - - @Override - public void onSubscribe(Subscription s) { - this.subscription = s; - s.request(1); - } - - @Override - public void onNext(ByteBuffer byteBuffer) { - - } - - @Override - public void onError(Throwable throwable) { - - } - - @Override - public void onComplete() { - subscription.request(1); - onCompleteCalled.set(true); - } - } - - testSubscriber subscriber = new testSubscriber(); - - @Test - public void concurrentRequests_shouldCompleteNormally() { - ByteArrayAsyncRequestBody byteArrayReq = new ByteArrayAsyncRequestBody("Hello World!".getBytes(), - Mimetype.MIMETYPE_OCTET_STREAM); - byteArrayReq.subscribe(subscriber); - assertTrue(subscriber.onCompleteCalled.get()); - } - -} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ByteBuffersAsyncRequestBodyTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ByteBuffersAsyncRequestBodyTest.java new file mode 100644 index 000000000000..b4073247f8b9 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ByteBuffersAsyncRequestBodyTest.java @@ -0,0 +1,227 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.async; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.stream.IntStream; +import org.junit.jupiter.api.Test; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.utils.BinaryUtils; + +class ByteBuffersAsyncRequestBodyTest { + + private static class TestSubscriber implements Subscriber { + private Subscription subscription; + private boolean onCompleteCalled = false; + private int callsToComplete = 0; + private final List publishedResults = Collections.synchronizedList(new ArrayList<>()); + + public void request(long n) { + subscription.request(n); + } + + @Override + public void onSubscribe(Subscription s) { + this.subscription = s; + } + + @Override + public void onNext(ByteBuffer byteBuffer) { + publishedResults.add(byteBuffer); + } + + @Override + public void onError(Throwable throwable) { + throw new IllegalStateException(throwable); + } + + @Override + public void onComplete() { + onCompleteCalled = true; + callsToComplete++; + } + } + + @Test + public void subscriberIsMarkedAsCompleted() { + AsyncRequestBody requestBody = ByteBuffersAsyncRequestBody.from("Hello World!".getBytes(StandardCharsets.UTF_8)); + + TestSubscriber subscriber = new TestSubscriber(); + requestBody.subscribe(subscriber); + subscriber.request(1); + + assertTrue(subscriber.onCompleteCalled); + assertEquals(1, subscriber.publishedResults.size()); + } + + @Test + public void subscriberIsMarkedAsCompletedWhenARequestIsMadeForMoreBuffersThanAreAvailable() { + AsyncRequestBody requestBody = ByteBuffersAsyncRequestBody.from("Hello World!".getBytes(StandardCharsets.UTF_8)); + + TestSubscriber subscriber = new TestSubscriber(); + requestBody.subscribe(subscriber); + subscriber.request(2); + + assertTrue(subscriber.onCompleteCalled); + assertEquals(1, subscriber.publishedResults.size()); + } + + @Test + public void subscriberIsThreadSafeAndMarkedAsCompletedExactlyOnce() throws InterruptedException { + int numBuffers = 100; + AsyncRequestBody requestBody = ByteBuffersAsyncRequestBody.of(IntStream.range(0, numBuffers) + .mapToObj(i -> ByteBuffer.wrap(new byte[1])) + .toArray(ByteBuffer[]::new)); + + TestSubscriber subscriber = new TestSubscriber(); + requestBody.subscribe(subscriber); + + int parallelism = 8; + ExecutorService executorService = Executors.newFixedThreadPool(parallelism); + for (int i = 0; i < parallelism; i++) { + executorService.submit(() -> { + for (int j = 0; j < numBuffers; j++) { + subscriber.request(2); + } + }); + } + executorService.shutdown(); + executorService.awaitTermination(1, TimeUnit.MINUTES); + + assertTrue(subscriber.onCompleteCalled); + assertEquals(1, subscriber.callsToComplete); + assertEquals(numBuffers, subscriber.publishedResults.size()); + } + + @Test + public void subscriberIsNotMarkedAsCompletedWhenThereAreRemainingBuffersToPublish() { + byte[] helloWorld = "Hello World!".getBytes(StandardCharsets.UTF_8); + byte[] goodbyeWorld = "Goodbye World!".getBytes(StandardCharsets.UTF_8); + AsyncRequestBody requestBody = ByteBuffersAsyncRequestBody.of((long) (helloWorld.length + goodbyeWorld.length), + ByteBuffer.wrap(helloWorld), + ByteBuffer.wrap(goodbyeWorld)); + + TestSubscriber subscriber = new TestSubscriber(); + requestBody.subscribe(subscriber); + subscriber.request(1); + + assertFalse(subscriber.onCompleteCalled); + assertEquals(1, subscriber.publishedResults.size()); + } + + @Test + public void subscriberReceivesAllBuffers() { + byte[] helloWorld = "Hello World!".getBytes(StandardCharsets.UTF_8); + byte[] goodbyeWorld = "Goodbye World!".getBytes(StandardCharsets.UTF_8); + + AsyncRequestBody requestBody = ByteBuffersAsyncRequestBody.of((long) (helloWorld.length + goodbyeWorld.length), + ByteBuffer.wrap(helloWorld), + ByteBuffer.wrap(goodbyeWorld)); + + TestSubscriber subscriber = new TestSubscriber(); + requestBody.subscribe(subscriber); + subscriber.request(2); + + assertEquals(2, subscriber.publishedResults.size()); + assertTrue(subscriber.onCompleteCalled); + assertArrayEquals(helloWorld, BinaryUtils.copyAllBytesFrom(subscriber.publishedResults.get(0))); + assertArrayEquals(goodbyeWorld, BinaryUtils.copyAllBytesFrom(subscriber.publishedResults.get(1))); + } + + @Test + public void multipleSubscribersReceiveTheSameResults() { + ByteBuffer sourceBuffer = ByteBuffer.wrap("Hello World!".getBytes(StandardCharsets.UTF_8)); + AsyncRequestBody requestBody = ByteBuffersAsyncRequestBody.of(sourceBuffer); + + TestSubscriber subscriber = new TestSubscriber(); + requestBody.subscribe(subscriber); + subscriber.request(1); + TestSubscriber otherSubscriber = new TestSubscriber(); + requestBody.subscribe(otherSubscriber); + otherSubscriber.request(1); + + ByteBuffer publishedBuffer = subscriber.publishedResults.get(0); + ByteBuffer otherPublishedBuffer = otherSubscriber.publishedResults.get(0); + + assertEquals(publishedBuffer, otherPublishedBuffer); + } + + @Test + public void canceledSubscriberDoesNotReturnNewResults() { + AsyncRequestBody requestBody = ByteBuffersAsyncRequestBody.of(ByteBuffer.wrap(new byte[0])); + + TestSubscriber subscriber = new TestSubscriber(); + requestBody.subscribe(subscriber); + + subscriber.subscription.cancel(); + subscriber.request(1); + + assertTrue(subscriber.publishedResults.isEmpty()); + } + + // Pending discussions on https://github.com/aws/aws-sdk-java-v2/issues/3928 + @Test + public void directBuffersAreCoppiedToNonDirectBuffers() { + byte[] bytes = "Hello World!".getBytes(StandardCharsets.UTF_8); + ByteBuffer buffer = ByteBuffer.allocateDirect(bytes.length) + .put(bytes); + buffer.flip(); + AsyncRequestBody requestBody = ByteBuffersAsyncRequestBody.of(buffer); + + TestSubscriber subscriber = new TestSubscriber(); + requestBody.subscribe(subscriber); + subscriber.request(1); + + ByteBuffer publishedBuffer = subscriber.publishedResults.get(0); + assertFalse(publishedBuffer.isDirect()); + byte[] publishedBytes = new byte[publishedBuffer.remaining()]; + publishedBuffer.get(publishedBytes); + assertArrayEquals(bytes, publishedBytes); + } + + @Test + public void staticOfByteBufferConstructorSetsLengthBasedOnBufferRemaining() { + ByteBuffer bb1 = ByteBuffer.allocate(2); + ByteBuffer bb2 = ByteBuffer.allocate(2); + bb2.position(1); + ByteBuffersAsyncRequestBody body = ByteBuffersAsyncRequestBody.of(bb1, bb2); + assertTrue(body.contentLength().isPresent()); + assertEquals(bb1.remaining() + bb2.remaining(), body.contentLength().get()); + } + + @Test + public void staticFromBytesConstructorSetsLengthBasedOnArrayLength() { + byte[] bytes = new byte[2]; + ByteBuffersAsyncRequestBody body = ByteBuffersAsyncRequestBody.from(bytes); + assertTrue(body.contentLength().isPresent()); + assertEquals(bytes.length, body.contentLength().get()); + } + +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/waiters/WaiterExecutorTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/waiters/WaiterExecutorTest.java new file mode 100644 index 000000000000..2df65de46e0b --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/waiters/WaiterExecutorTest.java @@ -0,0 +1,53 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.waiters; + +import java.util.Arrays; +import java.util.concurrent.atomic.LongAdder; +import org.junit.jupiter.api.Test; +import org.testng.Assert; +import software.amazon.awssdk.core.retry.backoff.BackoffStrategy; +import software.amazon.awssdk.core.waiters.WaiterAcceptor; +import software.amazon.awssdk.core.waiters.WaiterOverrideConfiguration; + +class WaiterExecutorTest { + @Test + void largeMaxAttempts() { + + int expectedAttempts = 10_000; + + WaiterOverrideConfiguration conf = + WaiterOverrideConfiguration.builder() + .maxAttempts(expectedAttempts) + .backoffStrategy(BackoffStrategy.none()) + .build(); + + WaiterExecutor sut = + new WaiterExecutor<>(new WaiterConfiguration(conf), + Arrays.asList( + WaiterAcceptor.retryOnResponseAcceptor(c -> c < expectedAttempts), + WaiterAcceptor.successOnResponseAcceptor(c -> c == expectedAttempts) + )); + + LongAdder attemptCounter = new LongAdder(); + sut.execute(() -> { + attemptCounter.increment(); + return attemptCounter.intValue(); + }); + + Assert.assertEquals(attemptCounter.intValue(), expectedAttempts); + } +} \ No newline at end of file diff --git a/http-client-spi/pom.xml b/http-client-spi/pom.xml index 839f2722f80a..4403c74e095d 100644 --- a/http-client-spi/pom.xml +++ b/http-client-spi/pom.xml @@ -22,7 +22,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT http-client-spi AWS Java SDK :: HTTP Client Interface diff --git a/http-clients/apache-client/pom.xml b/http-clients/apache-client/pom.xml index d260918088dc..8dec73ab5f2e 100644 --- a/http-clients/apache-client/pom.xml +++ b/http-clients/apache-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT apache-client diff --git a/http-clients/apache-client/src/main/java/software/amazon/awssdk/http/apache/internal/conn/IdleConnectionReaper.java b/http-clients/apache-client/src/main/java/software/amazon/awssdk/http/apache/internal/conn/IdleConnectionReaper.java index c40d7671f86d..af46691e5a19 100644 --- a/http-clients/apache-client/src/main/java/software/amazon/awssdk/http/apache/internal/conn/IdleConnectionReaper.java +++ b/http-clients/apache-client/src/main/java/software/amazon/awssdk/http/apache/internal/conn/IdleConnectionReaper.java @@ -16,8 +16,9 @@ package software.amazon.awssdk.http.apache.internal.conn; import java.time.Duration; +import java.util.Collections; import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; +import java.util.WeakHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; @@ -48,7 +49,7 @@ public final class IdleConnectionReaper { private volatile ReaperTask reaperTask; private IdleConnectionReaper() { - this.connectionManagers = new ConcurrentHashMap<>(); + this.connectionManagers = Collections.synchronizedMap(new WeakHashMap<>()); this.executorServiceSupplier = () -> { ExecutorService e = Executors.newSingleThreadExecutor(r -> { diff --git a/http-clients/aws-crt-client/pom.xml b/http-clients/aws-crt-client/pom.xml index d71e03a1d58f..bdf6f373a95c 100644 --- a/http-clients/aws-crt-client/pom.xml +++ b/http-clients/aws-crt-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 diff --git a/http-clients/netty-nio-client/pom.xml b/http-clients/netty-nio-client/pom.xml index ee1f50375d89..eb8c08cc618c 100644 --- a/http-clients/netty-nio-client/pom.xml +++ b/http-clients/netty-nio-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 @@ -85,6 +85,15 @@ io.netty netty-transport-classes-epoll + + io.netty + netty-resolver + + + io.netty + netty-resolver-dns + true + diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.java index 78a3fa80fa87..c12aeab10180 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.java @@ -103,6 +103,7 @@ private NettyNioAsyncHttpClient(DefaultBuilder builder, AttributeMap serviceDefa .sdkEventLoopGroup(sdkEventLoopGroup) .sslProvider(resolveSslProvider(builder)) .proxyConfiguration(builder.proxyConfiguration) + .useNonBlockingDnsResolver(builder.useNonBlockingDnsResolver) .build(); } @@ -475,6 +476,15 @@ public interface Builder extends SdkAsyncHttpClient.Builder http2ConfigurationBuilderConsumer); + + /** + * Configure whether to use a non-blocking dns resolver or not. False by default, as netty's default dns resolver is + * blocking; it namely calls java.net.InetAddress.getByName. + *

+ * When enabled, a non-blocking dns resolver will be used instead, by modifying netty's bootstrap configuration. + * See https://netty.io/news/2016/05/26/4-1-0-Final.html + */ + Builder useNonBlockingDnsResolver(Boolean useNonBlockingDnsResolver); } /** @@ -492,6 +502,7 @@ private static final class DefaultBuilder implements Builder { private Http2Configuration http2Configuration; private SslProvider sslProvider; private ProxyConfiguration proxyConfiguration; + private Boolean useNonBlockingDnsResolver; private DefaultBuilder() { } @@ -716,6 +727,16 @@ public void setHttp2Configuration(Http2Configuration http2Configuration) { http2Configuration(http2Configuration); } + @Override + public Builder useNonBlockingDnsResolver(Boolean useNonBlockingDnsResolver) { + this.useNonBlockingDnsResolver = useNonBlockingDnsResolver; + return this; + } + + public void setUseNonBlockingDnsResolver(Boolean useNonBlockingDnsResolver) { + useNonBlockingDnsResolver(useNonBlockingDnsResolver); + } + @Override public SdkAsyncHttpClient buildWithDefaults(AttributeMap serviceDefaults) { if (standardOptions.get(SdkHttpConfigurationOption.TLS_NEGOTIATION_TIMEOUT) == null) { diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroup.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroup.java index abb665f2c39a..254211e9303f 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroup.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroup.java @@ -19,11 +19,13 @@ import io.netty.channel.ChannelFactory; import io.netty.channel.EventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.DatagramChannel; +import io.netty.channel.socket.nio.NioDatagramChannel; import io.netty.channel.socket.nio.NioSocketChannel; import java.util.Optional; import java.util.concurrent.ThreadFactory; import software.amazon.awssdk.annotations.SdkPublicApi; -import software.amazon.awssdk.http.nio.netty.internal.utils.SocketChannelResolver; +import software.amazon.awssdk.http.nio.netty.internal.utils.ChannelResolver; import software.amazon.awssdk.utils.ThreadFactoryBuilder; import software.amazon.awssdk.utils.Validate; @@ -39,7 +41,8 @@ * *

  • Using {@link #create(EventLoopGroup)} to provide a custom {@link EventLoopGroup}. {@link ChannelFactory} will * be resolved based on the type of {@link EventLoopGroup} provided via - * {@link SocketChannelResolver#resolveSocketChannelFactory(EventLoopGroup)} + * {@link ChannelResolver#resolveSocketChannelFactory(EventLoopGroup)} and + * {@link ChannelResolver#resolveDatagramChannelFactory(EventLoopGroup)} *
  • * *
  • Using {@link #create(EventLoopGroup, ChannelFactory)} to provide a custom {@link EventLoopGroup} and @@ -63,12 +66,14 @@ public final class SdkEventLoopGroup { private final EventLoopGroup eventLoopGroup; private final ChannelFactory channelFactory; + private final ChannelFactory datagramChannelFactory; SdkEventLoopGroup(EventLoopGroup eventLoopGroup, ChannelFactory channelFactory) { Validate.paramNotNull(eventLoopGroup, "eventLoopGroup"); Validate.paramNotNull(channelFactory, "channelFactory"); this.eventLoopGroup = eventLoopGroup; this.channelFactory = channelFactory; + this.datagramChannelFactory = ChannelResolver.resolveDatagramChannelFactory(eventLoopGroup); } /** @@ -76,7 +81,8 @@ public final class SdkEventLoopGroup { */ private SdkEventLoopGroup(DefaultBuilder builder) { this.eventLoopGroup = resolveEventLoopGroup(builder); - this.channelFactory = resolveChannelFactory(); + this.channelFactory = resolveSocketChannelFactory(builder); + this.datagramChannelFactory = resolveDatagramChannelFactory(builder); } /** @@ -93,6 +99,13 @@ public ChannelFactory channelFactory() { return channelFactory; } + /** + * @return the {@link ChannelFactory} for datagram channels to be used with Netty Http Client. + */ + public ChannelFactory datagramChannelFactory() { + return datagramChannelFactory; + } + /** * Creates a new instance of SdkEventLoopGroup with {@link EventLoopGroup} and {@link ChannelFactory} * to be used with {@link NettyNioAsyncHttpClient}. @@ -116,7 +129,7 @@ public static SdkEventLoopGroup create(EventLoopGroup eventLoopGroup, ChannelFac * @return a new instance of SdkEventLoopGroup */ public static SdkEventLoopGroup create(EventLoopGroup eventLoopGroup) { - return create(eventLoopGroup, SocketChannelResolver.resolveSocketChannelFactory(eventLoopGroup)); + return create(eventLoopGroup, ChannelResolver.resolveSocketChannelFactory(eventLoopGroup)); } public static Builder builder() { @@ -141,11 +154,22 @@ private EventLoopGroup resolveEventLoopGroup(DefaultBuilder builder) { }*/ } - private ChannelFactory resolveChannelFactory() { - // Currently we only support NioEventLoopGroup + private ChannelFactory resolveSocketChannelFactory(DefaultBuilder builder) { + return builder.channelFactory; + } + + private ChannelFactory resolveDatagramChannelFactory(DefaultBuilder builder) { + return builder.datagramChannelFactory; + } + + private static ChannelFactory defaultSocketChannelFactory() { return NioSocketChannel::new; } + private static ChannelFactory defaultDatagramChannelFactory() { + return NioDatagramChannel::new; + } + /** * A builder for {@link SdkEventLoopGroup}. * @@ -172,6 +196,24 @@ public interface Builder { */ Builder threadFactory(ThreadFactory threadFactory); + /** + * {@link ChannelFactory} to create socket channels used by the {@link EventLoopGroup}. If not set, + * NioSocketChannel is used. + * + * @param channelFactory ChannelFactory to use. + * @return This builder for method chaining. + */ + Builder channelFactory(ChannelFactory channelFactory); + + /** + * {@link ChannelFactory} to create datagram channels used by the {@link EventLoopGroup}. If not set, + * NioDatagramChannel is used. + * + * @param datagramChannelFactory ChannelFactory to use. + * @return This builder for method chaining. + */ + Builder datagramChannelFactory(ChannelFactory datagramChannelFactory); + SdkEventLoopGroup build(); } @@ -179,6 +221,8 @@ private static final class DefaultBuilder implements Builder { private Integer numberOfThreads; private ThreadFactory threadFactory; + private ChannelFactory channelFactory = defaultSocketChannelFactory(); + private ChannelFactory datagramChannelFactory = defaultDatagramChannelFactory(); private DefaultBuilder() { } @@ -203,6 +247,26 @@ public void setThreadFactory(ThreadFactory threadFactory) { threadFactory(threadFactory); } + @Override + public Builder channelFactory(ChannelFactory channelFactory) { + this.channelFactory = channelFactory; + return this; + } + + public void setChannelFactory(ChannelFactory channelFactory) { + channelFactory(channelFactory); + } + + @Override + public Builder datagramChannelFactory(ChannelFactory datagramChannelFactory) { + this.datagramChannelFactory = datagramChannelFactory; + return this; + } + + public void setDatagramChannelFactory(ChannelFactory datagramChannelFactory) { + datagramChannelFactory(datagramChannelFactory); + } + @Override public SdkEventLoopGroup build() { return new SdkEventLoopGroup(this); diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMap.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMap.java index 1d55e1841aa2..fbd727239239 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMap.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMap.java @@ -83,6 +83,7 @@ public void channelCreated(Channel ch) throws Exception { private final ProxyConfiguration proxyConfiguration; private final BootstrapProvider bootstrapProvider; private final SslContextProvider sslContextProvider; + private final Boolean useNonBlockingDnsResolver; private AwaitCloseChannelPoolMap(Builder builder, Function createBootStrapProvider) { this.configuration = builder.configuration; @@ -94,6 +95,7 @@ private AwaitCloseChannelPoolMap(Builder builder, Function init(ChannelFactory datagramChannelFactory) { + try { + Class addressResolver = ClassLoaderHelper.loadClass(getAddressResolverGroup(), false, (Class) null); + Class dnsNameResolverBuilder = ClassLoaderHelper.loadClass(getDnsNameResolverBuilder(), false, (Class) null); + + Object dnsResolverObj = dnsNameResolverBuilder.newInstance(); + Method method = dnsResolverObj.getClass().getMethod("channelFactory", ChannelFactory.class); + method.invoke(dnsResolverObj, datagramChannelFactory); + + Object e = addressResolver.getConstructor(dnsNameResolverBuilder).newInstance(dnsResolverObj); + return (AddressResolverGroup) e; + } catch (ClassNotFoundException e) { + throw new IllegalStateException("Cannot find module io.netty.resolver.dns " + + " To use netty non blocking dns," + + " the 'netty-resolver-dns' module from io.netty must be on the class path. ", e); + } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException | InstantiationException e) { + throw new IllegalStateException("Failed to create AddressResolverGroup", e); + } + } + + private static String getAddressResolverGroup() { + return "io.netty.resolver.dns.DnsAddressResolverGroup"; + } + + private static String getDnsNameResolverBuilder() { + return "io.netty.resolver.dns.DnsNameResolverBuilder"; + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/ChannelResolver.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/ChannelResolver.java new file mode 100644 index 000000000000..8770d683a679 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/ChannelResolver.java @@ -0,0 +1,112 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.utils; + +import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelFactory; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.ReflectiveChannelFactory; +import io.netty.channel.epoll.EpollDatagramChannel; +import io.netty.channel.epoll.EpollEventLoopGroup; +import io.netty.channel.epoll.EpollSocketChannel; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.DatagramChannel; +import io.netty.channel.socket.nio.NioDatagramChannel; +import io.netty.channel.socket.nio.NioSocketChannel; +import java.util.HashMap; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.nio.netty.internal.DelegatingEventLoopGroup; + +@SdkInternalApi +public final class ChannelResolver { + + private static final Map KNOWN_EL_GROUPS_SOCKET_CHANNELS = new HashMap<>(); + private static final Map KNOWN_EL_GROUPS_DATAGRAM_CHANNELS = new HashMap<>(); + + static { + KNOWN_EL_GROUPS_SOCKET_CHANNELS.put("io.netty.channel.kqueue.KQueueEventLoopGroup", + "io.netty.channel.kqueue.KQueueSocketChannel"); + KNOWN_EL_GROUPS_SOCKET_CHANNELS.put("io.netty.channel.oio.OioEventLoopGroup", + "io.netty.channel.socket.oio.OioSocketChannel"); + + KNOWN_EL_GROUPS_DATAGRAM_CHANNELS.put("io.netty.channel.kqueue.KQueueEventLoopGroup", + "io.netty.channel.kqueue.KQueueDatagramChannel"); + KNOWN_EL_GROUPS_DATAGRAM_CHANNELS.put("io.netty.channel.oio.OioEventLoopGroup", + "io.netty.channel.socket.oio.OioDatagramChannel"); + } + + private ChannelResolver() { + } + + /** + * Attempts to determine the {@link ChannelFactory} class that corresponds to the given + * event loop group. + * + * @param eventLoopGroup the event loop group to determine the {@link ChannelFactory} for + * @return A {@link ChannelFactory} instance for the given event loop group. + */ + @SuppressWarnings("unchecked") + public static ChannelFactory resolveSocketChannelFactory(EventLoopGroup eventLoopGroup) { + if (eventLoopGroup instanceof DelegatingEventLoopGroup) { + return resolveSocketChannelFactory(((DelegatingEventLoopGroup) eventLoopGroup).getDelegate()); + } + + if (eventLoopGroup instanceof NioEventLoopGroup) { + return NioSocketChannel::new; + } + if (eventLoopGroup instanceof EpollEventLoopGroup) { + return EpollSocketChannel::new; + } + + String socketFqcn = KNOWN_EL_GROUPS_SOCKET_CHANNELS.get(eventLoopGroup.getClass().getName()); + if (socketFqcn == null) { + throw new IllegalArgumentException("Unknown event loop group : " + eventLoopGroup.getClass()); + } + + return invokeSafely(() -> new ReflectiveChannelFactory(Class.forName(socketFqcn))); + } + + /** + * Attempts to determine the {@link ChannelFactory} class for datagram channels that corresponds to the given + * event loop group. + * + * @param eventLoopGroup the event loop group to determine the {@link ChannelFactory} for + * @return A {@link ChannelFactory} instance for the given event loop group. + */ + @SuppressWarnings("unchecked") + public static ChannelFactory resolveDatagramChannelFactory(EventLoopGroup eventLoopGroup) { + if (eventLoopGroup instanceof DelegatingEventLoopGroup) { + return resolveDatagramChannelFactory(((DelegatingEventLoopGroup) eventLoopGroup).getDelegate()); + } + + if (eventLoopGroup instanceof NioEventLoopGroup) { + return NioDatagramChannel::new; + } + if (eventLoopGroup instanceof EpollEventLoopGroup) { + return EpollDatagramChannel::new; + } + + String datagramFqcn = KNOWN_EL_GROUPS_DATAGRAM_CHANNELS.get(eventLoopGroup.getClass().getName()); + if (datagramFqcn == null) { + throw new IllegalArgumentException("Unknown event loop group : " + eventLoopGroup.getClass()); + } + + return invokeSafely(() -> new ReflectiveChannelFactory(Class.forName(datagramFqcn))); + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/SocketChannelResolver.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/SocketChannelResolver.java deleted file mode 100644 index 1d80dad5850f..000000000000 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/SocketChannelResolver.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.http.nio.netty.internal.utils; - -import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; - -import io.netty.channel.Channel; -import io.netty.channel.ChannelFactory; -import io.netty.channel.EventLoopGroup; -import io.netty.channel.ReflectiveChannelFactory; -import io.netty.channel.epoll.EpollEventLoopGroup; -import io.netty.channel.epoll.EpollSocketChannel; -import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.socket.nio.NioSocketChannel; -import java.util.HashMap; -import java.util.Map; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.http.nio.netty.internal.DelegatingEventLoopGroup; - -@SdkInternalApi -public final class SocketChannelResolver { - - private static final Map KNOWN_EL_GROUPS = new HashMap<>(); - - static { - KNOWN_EL_GROUPS.put("io.netty.channel.kqueue.KQueueEventLoopGroup", "io.netty.channel.kqueue.KQueueSocketChannel"); - KNOWN_EL_GROUPS.put("io.netty.channel.oio.OioEventLoopGroup", "io.netty.channel.socket.oio.OioSocketChannel"); - } - - private SocketChannelResolver() { - } - - /** - * Attempts to determine the {@link ChannelFactory} class that corresponds to the given - * event loop group. - * - * @param eventLoopGroup the event loop group to determine the {@link ChannelFactory} for - * @return A {@link ChannelFactory} instance for the given event loop group. - */ - @SuppressWarnings("unchecked") - public static ChannelFactory resolveSocketChannelFactory(EventLoopGroup eventLoopGroup) { - if (eventLoopGroup instanceof DelegatingEventLoopGroup) { - return resolveSocketChannelFactory(((DelegatingEventLoopGroup) eventLoopGroup).getDelegate()); - } - - if (eventLoopGroup instanceof NioEventLoopGroup) { - return NioSocketChannel::new; - } - if (eventLoopGroup instanceof EpollEventLoopGroup) { - return EpollSocketChannel::new; - } - - String socketFqcn = KNOWN_EL_GROUPS.get(eventLoopGroup.getClass().getName()); - if (socketFqcn == null) { - throw new IllegalArgumentException("Unknown event loop group : " + eventLoopGroup.getClass()); - } - - return invokeSafely(() -> new ReflectiveChannelFactory(Class.forName(socketFqcn))); - } -} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyClientTlsAuthTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyClientTlsAuthTest.java index dc7c408c3c9f..f35c0914609d 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyClientTlsAuthTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyClientTlsAuthTest.java @@ -39,6 +39,7 @@ import software.amazon.awssdk.http.EmptyPublisher; import software.amazon.awssdk.http.FileStoreTlsKeyManagersProvider; import software.amazon.awssdk.http.HttpTestUtils; +import software.amazon.awssdk.http.SdkHttpConfigurationOption; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpMethod; import software.amazon.awssdk.http.TlsKeyManagersProvider; @@ -185,6 +186,24 @@ public void nonProxy_noKeyManagerGiven_shouldThrowException() { .hasRootCauseInstanceOf(SSLException.class); } + @Test + public void builderUsesProvidedKeyManagersProviderNonBlockingDns() { + TlsKeyManagersProvider mockKeyManagersProvider = mock(TlsKeyManagersProvider.class); + netty = NettyNioAsyncHttpClient.builder() + .useNonBlockingDnsResolver(true) + .proxyConfiguration(proxyCfg) + .tlsKeyManagersProvider(mockKeyManagersProvider) + .buildWithDefaults(AttributeMap.builder() + .put(TRUST_ALL_CERTIFICATES, true) + .build()); + + try { + sendRequest(netty, new RecordingResponseHandler()); + } catch (Exception ignored) { + } + verify(mockKeyManagersProvider).keyManagers(); + } + private void sendRequest(SdkAsyncHttpClient client, SdkAsyncHttpResponseHandler responseHandler) { AsyncExecuteRequest req = AsyncExecuteRequest.builder() .request(testSdkRequest()) diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientNonBlockingDnsTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientNonBlockingDnsTest.java new file mode 100644 index 000000000000..9535c41c2b0a --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientNonBlockingDnsTest.java @@ -0,0 +1,171 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.any; +import static com.github.tomakehurst.wiremock.client.WireMock.equalTo; +import static com.github.tomakehurst.wiremock.client.WireMock.postRequestedFor; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; +import static com.github.tomakehurst.wiremock.client.WireMock.verify; +import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; +import static java.util.Collections.singletonMap; +import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic; +import static org.apache.commons.lang3.StringUtils.reverse; +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClientTestUtils.assertCanReceiveBasicRequest; +import static software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClientTestUtils.createProvider; +import static software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClientTestUtils.createRequest; +import static software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClientTestUtils.makeSimpleRequest; + +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import java.io.IOException; +import java.net.URI; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.assertj.core.api.Condition; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.http.SdkHttpConfigurationOption; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.utils.AttributeMap; + +@RunWith(MockitoJUnitRunner.class) +public class NettyNioAsyncHttpClientNonBlockingDnsTest { + + private final RecordingNetworkTrafficListener wiremockTrafficListener = new RecordingNetworkTrafficListener(); + + private static final SdkAsyncHttpClient client = NettyNioAsyncHttpClient.builder() + .useNonBlockingDnsResolver(true) + .buildWithDefaults( + AttributeMap.builder() + .put(SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES, true) + .build()); + @Rule + public WireMockRule mockServer = new WireMockRule(wireMockConfig() + .dynamicPort() + .dynamicHttpsPort() + .networkTrafficListener(wiremockTrafficListener)); + + @Before + public void methodSetup() { + wiremockTrafficListener.reset(); + } + + @AfterClass + public static void tearDown() throws Exception { + client.close(); + } + + @Test + public void canSendContentAndGetThatContentBackNonBlockingDns() throws Exception { + String body = randomAlphabetic(50); + stubFor(any(urlEqualTo("/echo?reversed=true")) + .withRequestBody(equalTo(body)) + .willReturn(aResponse().withBody(reverse(body)))); + URI uri = URI.create("http://localhost:" + mockServer.port()); + + SdkHttpRequest request = createRequest(uri, "/echo", body, SdkHttpMethod.POST, singletonMap("reversed", "true")); + + RecordingResponseHandler recorder = new RecordingResponseHandler(); + + client.execute(AsyncExecuteRequest.builder().request(request).requestContentPublisher(createProvider(body)).responseHandler(recorder).build()); + + recorder.completeFuture.get(5, TimeUnit.SECONDS); + + verify(1, postRequestedFor(urlEqualTo("/echo?reversed=true"))); + + assertThat(recorder.fullResponseAsString()).isEqualTo(reverse(body)); + } + + @Test + public void defaultThreadFactoryUsesHelpfulName() throws Exception { + // Make a request to ensure a thread is primed + makeSimpleRequest(client, mockServer); + + String expectedPattern = "aws-java-sdk-NettyEventLoop-\\d+-\\d+"; + assertThat(Thread.getAllStackTraces().keySet()) + .areAtLeast(1, new Condition<>(t -> t.getName().matches(expectedPattern), + "Matches default thread pattern: `%s`", expectedPattern)); + } + + @Test + public void canMakeBasicRequestOverHttp() throws Exception { + String smallBody = randomAlphabetic(10); + URI uri = URI.create("http://localhost:" + mockServer.port()); + + assertCanReceiveBasicRequest(client, uri, smallBody); + } + + @Test + public void canMakeBasicRequestOverHttps() throws Exception { + String smallBody = randomAlphabetic(10); + URI uri = URI.create("https://localhost:" + mockServer.httpsPort()); + + assertCanReceiveBasicRequest(client, uri, smallBody); + } + + @Test + public void canHandleLargerPayloadsOverHttp() throws Exception { + String largishBody = randomAlphabetic(25000); + + URI uri = URI.create("http://localhost:" + mockServer.port()); + + assertCanReceiveBasicRequest(client, uri, largishBody); + } + + @Test + public void canHandleLargerPayloadsOverHttps() throws Exception { + String largishBody = randomAlphabetic(25000); + + URI uri = URI.create("https://localhost:" + mockServer.httpsPort()); + + assertCanReceiveBasicRequest(client, uri, largishBody); + } + + @Test + public void requestContentOnlyEqualToContentLengthHeaderFromProvider() throws InterruptedException, ExecutionException, TimeoutException, IOException { + final String content = randomAlphabetic(32); + final String streamContent = content + reverse(content); + stubFor(any(urlEqualTo("/echo?reversed=true")) + .withRequestBody(equalTo(content)) + .willReturn(aResponse().withBody(reverse(content)))); + URI uri = URI.create("http://localhost:" + mockServer.port()); + + SdkHttpFullRequest request = createRequest(uri, "/echo", streamContent, SdkHttpMethod.POST, singletonMap("reversed", "true")); + request = request.toBuilder().putHeader("Content-Length", Integer.toString(content.length())).build(); + RecordingResponseHandler recorder = new RecordingResponseHandler(); + + client.execute(AsyncExecuteRequest.builder().request(request).requestContentPublisher(createProvider(streamContent)).responseHandler(recorder).build()); + + recorder.completeFuture.get(5, TimeUnit.SECONDS); + + // HTTP servers will stop processing the request as soon as it reads + // bytes equal to 'Content-Length' so we need to inspect the raw + // traffic to ensure that there wasn't anything after that. + assertThat(wiremockTrafficListener.requests().toString()).endsWith(content); + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientTestUtils.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientTestUtils.java new file mode 100644 index 000000000000..04f9a906ee04 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientTestUtils.java @@ -0,0 +1,148 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.any; +import static com.github.tomakehurst.wiremock.client.WireMock.getRequestedFor; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlMatching; +import static com.github.tomakehurst.wiremock.client.WireMock.urlPathEqualTo; +import static com.github.tomakehurst.wiremock.client.WireMock.verify; +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Collections.emptyMap; +import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic; +import static org.apache.commons.lang3.StringUtils.isBlank; +import static org.assertj.core.api.Assertions.assertThat; + +import com.github.tomakehurst.wiremock.WireMockServer; +import java.net.URI; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.async.SdkHttpContentPublisher; + +public class NettyNioAsyncHttpClientTestUtils { + + /** + * Make a simple async request and wait for it to fiish. + * + * @param client Client to make request with. + */ + public static void makeSimpleRequest(SdkAsyncHttpClient client, WireMockServer mockServer) throws Exception { + String body = randomAlphabetic(10); + URI uri = URI.create("http://localhost:" + mockServer.port()); + stubFor(any(urlPathEqualTo("/")).willReturn(aResponse().withBody(body))); + SdkHttpRequest request = createRequest(uri); + RecordingResponseHandler recorder = new RecordingResponseHandler(); + client.execute(AsyncExecuteRequest.builder().request(request).requestContentPublisher(createProvider("")).responseHandler(recorder).build()); + recorder.completeFuture.get(5, TimeUnit.SECONDS); + } + + public static SdkHttpContentPublisher createProvider(String body) { + Stream chunks = splitStringBySize(body).stream() + .map(chunk -> ByteBuffer.wrap(chunk.getBytes(UTF_8))); + return new SdkHttpContentPublisher() { + + @Override + public Optional contentLength() { + return Optional.of(Long.valueOf(body.length())); + } + + @Override + public void subscribe(Subscriber s) { + s.onSubscribe(new Subscription() { + @Override + public void request(long n) { + chunks.forEach(s::onNext); + s.onComplete(); + } + + @Override + public void cancel() { + + } + }); + } + }; + } + + public static SdkHttpFullRequest createRequest(URI uri) { + return createRequest(uri, "/", null, SdkHttpMethod.GET, emptyMap()); + } + + public static SdkHttpFullRequest createRequest(URI uri, + String resourcePath, + String body, + SdkHttpMethod method, + Map params) { + String contentLength = body == null ? null : String.valueOf(body.getBytes(UTF_8).length); + return SdkHttpFullRequest.builder() + .uri(uri) + .method(method) + .encodedPath(resourcePath) + .applyMutation(b -> params.forEach(b::putRawQueryParameter)) + .applyMutation(b -> { + b.putHeader("Host", uri.getHost()); + if (contentLength != null) { + b.putHeader("Content-Length", contentLength); + } + }).build(); + } + + public static void assertCanReceiveBasicRequest(SdkAsyncHttpClient client, URI uri, String body) throws Exception { + stubFor(any(urlPathEqualTo("/")).willReturn(aResponse().withHeader("Some-Header", "With Value").withBody(body))); + + SdkHttpRequest request = createRequest(uri); + + RecordingResponseHandler recorder = new RecordingResponseHandler(); + client.execute(AsyncExecuteRequest.builder().request(request).requestContentPublisher(createProvider("")).responseHandler(recorder).build()); + + recorder.completeFuture.get(5, TimeUnit.SECONDS); + + assertThat(recorder.responses).hasOnlyOneElementSatisfying( + headerResponse -> { + assertThat(headerResponse.headers()).containsKey("Some-Header"); + assertThat(headerResponse.statusCode()).isEqualTo(200); + }); + + assertThat(recorder.fullResponseAsString()).isEqualTo(body); + verify(1, getRequestedFor(urlMatching("/"))); + } + + private static Collection splitStringBySize(String str) { + if (isBlank(str)) { + return Collections.emptyList(); + } + ArrayList split = new ArrayList<>(); + for (int i = 0; i <= str.length() / 1000; i++) { + split.add(str.substring(i * 1000, Math.min((i + 1) * 1000, str.length()))); + } + return split; + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientWireMockTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientWireMockTest.java index 9a1121e201f5..116119d36ea5 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientWireMockTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientWireMockTest.java @@ -18,19 +18,14 @@ import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; import static com.github.tomakehurst.wiremock.client.WireMock.any; import static com.github.tomakehurst.wiremock.client.WireMock.equalTo; -import static com.github.tomakehurst.wiremock.client.WireMock.getRequestedFor; import static com.github.tomakehurst.wiremock.client.WireMock.postRequestedFor; import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; -import static com.github.tomakehurst.wiremock.client.WireMock.urlMatching; import static com.github.tomakehurst.wiremock.client.WireMock.urlPathEqualTo; import static com.github.tomakehurst.wiremock.client.WireMock.verify; import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; -import static java.nio.charset.StandardCharsets.UTF_8; -import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic; -import static org.apache.commons.lang3.StringUtils.isBlank; import static org.apache.commons.lang3.StringUtils.reverse; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; @@ -40,6 +35,10 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.when; +import static software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClientTestUtils.assertCanReceiveBasicRequest; +import static software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClientTestUtils.createProvider; +import static software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClientTestUtils.createRequest; +import static software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClientTestUtils.makeSimpleRequest; import com.github.tomakehurst.wiremock.WireMockServer; import com.github.tomakehurst.wiremock.http.Fault; @@ -49,25 +48,22 @@ import io.netty.channel.ChannelFuture; import io.netty.channel.EventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.DatagramChannel; +import io.netty.channel.socket.nio.NioDatagramChannel; import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.handler.ssl.SslProvider; import io.netty.util.AttributeKey; import java.io.IOException; import java.net.URI; -import java.nio.ByteBuffer; import java.time.Duration; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; import java.util.List; -import java.util.Map; -import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import java.util.stream.Stream; import javax.net.ssl.TrustManagerFactory; import org.assertj.core.api.Condition; import org.junit.AfterClass; @@ -78,8 +74,6 @@ import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; import org.mockito.stubbing.Answer; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; import software.amazon.awssdk.http.HttpMetric; import software.amazon.awssdk.http.HttpTestUtils; import software.amazon.awssdk.http.SdkHttpConfigurationOption; @@ -88,7 +82,6 @@ import software.amazon.awssdk.http.SdkHttpRequest; import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; -import software.amazon.awssdk.http.async.SdkHttpContentPublisher; import software.amazon.awssdk.http.nio.netty.internal.NettyConfiguration; import software.amazon.awssdk.http.nio.netty.internal.SdkChannelPool; import software.amazon.awssdk.http.nio.netty.internal.SdkChannelPoolMap; @@ -183,7 +176,8 @@ public void invalidMaxPendingConnectionAcquireConfig_shouldPropagateException() .maxConcurrency(1) .maxPendingConnectionAcquires(0) .build()) { - assertThatThrownBy(() -> makeSimpleRequest(customClient)).hasMessageContaining("java.lang.IllegalArgumentException: maxPendingAcquires: 0 (expected: >= 1)"); + assertThatThrownBy(() -> makeSimpleRequest(customClient, mockServer)).hasMessageContaining("java.lang" + + ".IllegalArgumentException: maxPendingAcquires: 0 (expected: >= 1)"); } } @@ -196,7 +190,7 @@ public void customFactoryIsUsed() throws Exception { .threadFactory(threadFactory)) .build(); - makeSimpleRequest(customClient); + makeSimpleRequest(customClient, mockServer); customClient.close(); Mockito.verify(threadFactory, atLeastOnce()).newThread(Mockito.any()); @@ -208,7 +202,7 @@ public void openSslBeingUsed() throws Exception { NettyNioAsyncHttpClient.builder() .sslProvider(SslProvider.OPENSSL) .build()) { - makeSimpleRequest(customClient); + makeSimpleRequest(customClient, mockServer); } } @@ -218,7 +212,7 @@ public void defaultJdkSslProvider() throws Exception { NettyNioAsyncHttpClient.builder() .sslProvider(SslProvider.JDK) .build()) { - makeSimpleRequest(customClient); + makeSimpleRequest(customClient, mockServer); customClient.close(); } } @@ -226,7 +220,7 @@ public void defaultJdkSslProvider() throws Exception { @Test public void defaultThreadFactoryUsesHelpfulName() throws Exception { // Make a request to ensure a thread is primed - makeSimpleRequest(client); + makeSimpleRequest(client, mockServer); String expectedPattern = "aws-java-sdk-NettyEventLoop-\\d+-\\d+"; assertThat(Thread.getAllStackTraces().keySet()) @@ -247,7 +241,7 @@ public void customThreadCountIsRespected() throws Exception { // Have to make enough requests to prime the threads for (int i = 0; i < threadCount + 1; i++) { - makeSimpleRequest(customClient); + makeSimpleRequest(customClient, mockServer); } customClient.close(); @@ -267,7 +261,7 @@ public void customEventLoopGroup_NotClosedWhenClientIsClosed() throws Exception .eventLoopGroup(SdkEventLoopGroup.create(eventLoopGroup, NioSocketChannel::new)) .build(); - makeSimpleRequest(customClient); + makeSimpleRequest(customClient, mockServer); customClient.close(); Mockito.verify(threadFactory, atLeastOnce()).newThread(Mockito.any()); @@ -287,7 +281,7 @@ public void customChannelFactoryIsUsed() throws Exception { .eventLoopGroup(SdkEventLoopGroup.create(customEventLoopGroup, channelFactory)) .build(); - makeSimpleRequest(customClient); + makeSimpleRequest(customClient, mockServer); customClient.close(); Mockito.verify(channelFactory, atLeastOnce()).newChannel(); @@ -335,7 +329,7 @@ public void responseConnectionReused_shouldReleaseChannel() throws Exception { .maxConcurrency(1) .build(); - makeSimpleRequest(customClient); + makeSimpleRequest(customClient, mockServer); verifyChannelRelease(channel); assertThat(channel.isShutdown()).isFalse(); @@ -446,27 +440,12 @@ public void builderUsesProvidedTrustManagersProvider() throws Exception { } } - /** - * Make a simple async request and wait for it to fiish. - * - * @param client Client to make request with. - */ - private void makeSimpleRequest(SdkAsyncHttpClient client) throws Exception { - String body = randomAlphabetic(10); - URI uri = URI.create("http://localhost:" + mockServer.port()); - stubFor(any(urlPathEqualTo("/")).willReturn(aResponse().withBody(body))); - SdkHttpRequest request = createRequest(uri); - RecordingResponseHandler recorder = new RecordingResponseHandler(); - client.execute(AsyncExecuteRequest.builder().request(request).requestContentPublisher(createProvider("")).responseHandler(recorder).build()); - recorder.completeFuture.get(5, TimeUnit.SECONDS); - } - @Test public void canMakeBasicRequestOverHttp() throws Exception { String smallBody = randomAlphabetic(10); URI uri = URI.create("http://localhost:" + mockServer.port()); - assertCanReceiveBasicRequest(uri, smallBody); + assertCanReceiveBasicRequest(client, uri, smallBody); } @Test @@ -474,7 +453,7 @@ public void canMakeBasicRequestOverHttps() throws Exception { String smallBody = randomAlphabetic(10); URI uri = URI.create("https://localhost:" + mockServer.httpsPort()); - assertCanReceiveBasicRequest(uri, smallBody); + assertCanReceiveBasicRequest(client, uri, smallBody); } @Test @@ -483,7 +462,7 @@ public void canHandleLargerPayloadsOverHttp() throws Exception { URI uri = URI.create("http://localhost:" + mockServer.port()); - assertCanReceiveBasicRequest(uri, largishBody); + assertCanReceiveBasicRequest(client, uri, largishBody); } @Test @@ -492,7 +471,7 @@ public void canHandleLargerPayloadsOverHttps() throws Exception { URI uri = URI.create("https://localhost:" + mockServer.httpsPort()); - assertCanReceiveBasicRequest(uri, largishBody); + assertCanReceiveBasicRequest(client, uri, largishBody); } @Test @@ -579,88 +558,6 @@ public ChannelFuture close() { assertThat(channelClosedFuture.get(5, TimeUnit.SECONDS)).isTrue(); } - private void assertCanReceiveBasicRequest(URI uri, String body) throws Exception { - stubFor(any(urlPathEqualTo("/")).willReturn(aResponse().withHeader("Some-Header", "With Value").withBody(body))); - - SdkHttpRequest request = createRequest(uri); - - RecordingResponseHandler recorder = new RecordingResponseHandler(); - client.execute(AsyncExecuteRequest.builder().request(request).requestContentPublisher(createProvider("")).responseHandler(recorder).build()); - - recorder.completeFuture.get(5, TimeUnit.SECONDS); - - assertThat(recorder.responses).hasOnlyOneElementSatisfying( - headerResponse -> { - assertThat(headerResponse.headers()).containsKey("Some-Header"); - assertThat(headerResponse.statusCode()).isEqualTo(200); - }); - - assertThat(recorder.fullResponseAsString()).isEqualTo(body); - verify(1, getRequestedFor(urlMatching("/"))); - } - - private SdkHttpContentPublisher createProvider(String body) { - Stream chunks = splitStringBySize(body).stream() - .map(chunk -> ByteBuffer.wrap(chunk.getBytes(UTF_8))); - return new SdkHttpContentPublisher() { - - @Override - public Optional contentLength() { - return Optional.of(Long.valueOf(body.length())); - } - - @Override - public void subscribe(Subscriber s) { - s.onSubscribe(new Subscription() { - @Override - public void request(long n) { - chunks.forEach(s::onNext); - s.onComplete(); - } - - @Override - public void cancel() { - - } - }); - } - }; - } - - private SdkHttpFullRequest createRequest(URI uri) { - return createRequest(uri, "/", null, SdkHttpMethod.GET, emptyMap()); - } - - private SdkHttpFullRequest createRequest(URI uri, - String resourcePath, - String body, - SdkHttpMethod method, - Map params) { - String contentLength = body == null ? null : String.valueOf(body.getBytes(UTF_8).length); - return SdkHttpFullRequest.builder() - .uri(uri) - .method(method) - .encodedPath(resourcePath) - .applyMutation(b -> params.forEach(b::putRawQueryParameter)) - .applyMutation(b -> { - b.putHeader("Host", uri.getHost()); - if (contentLength != null) { - b.putHeader("Content-Length", contentLength); - } - }).build(); - } - - private static Collection splitStringBySize(String str) { - if (isBlank(str)) { - return Collections.emptyList(); - } - ArrayList split = new ArrayList<>(); - for (int i = 0; i <= str.length() / 1000; i++) { - split.add(str.substring(i * 1000, Math.min((i + 1) * 1000, str.length()))); - } - return split; - } - // Needs to be a non-anon class in order to spy public static class CustomThreadFactory implements ThreadFactory { @Override @@ -719,7 +616,7 @@ public void createNettyClient_ReadWriteTimeoutCanBeZero() throws Exception { .writeTimeout(Duration.ZERO) .build(); - makeSimpleRequest(customClient); + makeSimpleRequest(customClient, mockServer); customClient.close(); } diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/ProxyWireMockTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/ProxyWireMockTest.java index f797a760fdf7..438d65e1f9fc 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/ProxyWireMockTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/ProxyWireMockTest.java @@ -126,6 +126,30 @@ public void proxyConfigured_hostInNonProxySet_doesNotConnect() { assertThat(responseHandler.fullResponseAsString()).isEqualTo("hello"); } + @Test + public void proxyConfigured_hostInNonProxySet_nonBlockingDns_doesNotConnect() { + RecordingResponseHandler responseHandler = new RecordingResponseHandler(); + AsyncExecuteRequest req = AsyncExecuteRequest.builder() + .request(testSdkRequest()) + .responseHandler(responseHandler) + .requestContentPublisher(new EmptyPublisher()) + .build(); + + ProxyConfiguration cfg = proxyCfg.toBuilder() + .nonProxyHosts(Stream.of("localhost").collect(Collectors.toSet())) + .build(); + + client = NettyNioAsyncHttpClient.builder() + .proxyConfiguration(cfg) + .useNonBlockingDnsResolver(true) + .build(); + + client.execute(req).join(); + + responseHandler.completeFuture.join(); + assertThat(responseHandler.fullResponseAsString()).isEqualTo("hello"); + } + private SdkHttpFullRequest testSdkRequest() { return SdkHttpFullRequest.builder() .method(SdkHttpMethod.GET) diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroupTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroupTest.java index a3ae76469359..bb2598345cff 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroupTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/SdkEventLoopGroupTest.java @@ -18,8 +18,15 @@ import static org.assertj.core.api.Assertions.assertThat; import io.netty.channel.DefaultEventLoopGroup; +import io.netty.channel.epoll.EpollDatagramChannel; +import io.netty.channel.epoll.EpollEventLoopGroup; +import io.netty.channel.epoll.EpollSocketChannel; import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.oio.OioEventLoopGroup; +import io.netty.channel.socket.nio.NioDatagramChannel; import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.channel.socket.oio.OioDatagramChannel; +import io.netty.channel.socket.oio.OioSocketChannel; import org.junit.Test; public class SdkEventLoopGroupTest { @@ -28,13 +35,24 @@ public class SdkEventLoopGroupTest { public void creatingUsingBuilder() { SdkEventLoopGroup sdkEventLoopGroup = SdkEventLoopGroup.builder().numberOfThreads(1).build(); assertThat(sdkEventLoopGroup.channelFactory()).isNotNull(); + assertThat(sdkEventLoopGroup.datagramChannelFactory()).isNotNull(); assertThat(sdkEventLoopGroup.eventLoopGroup()).isNotNull(); } @Test - public void creatingUsingStaticMethod() { + public void creatingUsingStaticMethod_A() { SdkEventLoopGroup sdkEventLoopGroup = SdkEventLoopGroup.create(new NioEventLoopGroup(), NioSocketChannel::new); assertThat(sdkEventLoopGroup.channelFactory()).isNotNull(); + assertThat(sdkEventLoopGroup.datagramChannelFactory().newChannel()).isInstanceOf(NioDatagramChannel.class); + assertThat(sdkEventLoopGroup.eventLoopGroup()).isNotNull(); + } + + @Test + public void creatingUsingStaticMethod_B() { + SdkEventLoopGroup sdkEventLoopGroup = SdkEventLoopGroup.create(new OioEventLoopGroup(), OioSocketChannel::new); + assertThat(sdkEventLoopGroup.channelFactory()).isNotNull(); + assertThat(sdkEventLoopGroup.datagramChannelFactory()).isNotNull(); + assertThat(sdkEventLoopGroup.datagramChannelFactory().newChannel()).isInstanceOf(OioDatagramChannel.class); assertThat(sdkEventLoopGroup.eventLoopGroup()).isNotNull(); } @@ -43,6 +61,7 @@ public void notProvidingChannelFactory_channelFactoryResolved() { SdkEventLoopGroup sdkEventLoopGroup = SdkEventLoopGroup.create(new NioEventLoopGroup()); assertThat(sdkEventLoopGroup.channelFactory()).isNotNull(); + assertThat(sdkEventLoopGroup.datagramChannelFactory().newChannel()).isInstanceOf(NioDatagramChannel.class); } @Test(expected = IllegalArgumentException.class) diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMapTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMapTest.java index 3b72f71be4db..17289d1ca3b3 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMapTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMapTest.java @@ -118,7 +118,7 @@ public void get_callsInjectedBootstrapProviderCorrectly() { channelPoolMap = new AwaitCloseChannelPoolMap(builder, null, bootstrapProvider); channelPoolMap.get(targetUri); - verify(bootstrapProvider).createBootstrap("some-awesome-service-1234.amazonaws.com", 8080); + verify(bootstrapProvider).createBootstrap("some-awesome-service-1234.amazonaws.com", 8080, null); } @Test @@ -151,7 +151,7 @@ public void get_usingProxy_callsInjectedBootstrapProviderCorrectly() { channelPoolMap = new AwaitCloseChannelPoolMap(builder, shouldProxyCache, bootstrapProvider); channelPoolMap.get(targetUri); - verify(bootstrapProvider).createBootstrap("localhost", mockProxy.port()); + verify(bootstrapProvider).createBootstrap("localhost", mockProxy.port(), null); } @Test diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/BootstrapProviderTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/BootstrapProviderTest.java index 337cb7ba2ec2..914587b85df3 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/BootstrapProviderTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/BootstrapProviderTest.java @@ -42,7 +42,19 @@ public class BootstrapProviderTest { // connection attempt and not cached between connection attempts. @Test public void createBootstrap_usesUnresolvedInetSocketAddress() { - Bootstrap bootstrap = bootstrapProvider.createBootstrap("some-awesome-service-1234.amazonaws.com", 443); + Bootstrap bootstrap = bootstrapProvider.createBootstrap("some-awesome-service-1234.amazonaws.com", 443, false); + + SocketAddress socketAddress = bootstrap.config().remoteAddress(); + + assertThat(socketAddress).isInstanceOf(InetSocketAddress.class); + InetSocketAddress inetSocketAddress = (InetSocketAddress)socketAddress; + + assertThat(inetSocketAddress.isUnresolved()).isTrue(); + } + + @Test + public void createBootstrapNonBlockingDns_usesUnresolvedInetSocketAddress() { + Bootstrap bootstrap = bootstrapProvider.createBootstrap("some-awesome-service-1234.amazonaws.com", 443, true); SocketAddress socketAddress = bootstrap.config().remoteAddress(); @@ -54,7 +66,7 @@ public void createBootstrap_usesUnresolvedInetSocketAddress() { @Test public void createBootstrap_defaultConfiguration_tcpKeepAliveShouldBeFalse() { - Bootstrap bootstrap = bootstrapProvider.createBootstrap("some-awesome-service-1234.amazonaws.com", 443); + Bootstrap bootstrap = bootstrapProvider.createBootstrap("some-awesome-service-1234.amazonaws.com", 443, false); Boolean keepAlive = (Boolean) bootstrap.config().options().get(ChannelOption.SO_KEEPALIVE); assertThat(keepAlive).isFalse(); @@ -70,7 +82,7 @@ public void createBootstrap_tcpKeepAliveTrue_shouldApply() { nettyConfiguration, new SdkChannelOptions()); - Bootstrap bootstrap = provider.createBootstrap("some-awesome-service-1234.amazonaws.com", 443); + Bootstrap bootstrap = provider.createBootstrap("some-awesome-service-1234.amazonaws.com", 443, false); Boolean keepAlive = (Boolean) bootstrap.config().options().get(ChannelOption.SO_KEEPALIVE); assertThat(keepAlive).isTrue(); } diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/DnsResolverLoaderTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/DnsResolverLoaderTest.java new file mode 100644 index 000000000000..40db804aacaf --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/DnsResolverLoaderTest.java @@ -0,0 +1,34 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.netty.channel.epoll.EpollDatagramChannel; +import io.netty.channel.socket.nio.NioDatagramChannel; +import io.netty.channel.socket.oio.OioDatagramChannel; +import io.netty.resolver.dns.DnsAddressResolverGroup; +import org.junit.jupiter.api.Test; + +public class DnsResolverLoaderTest { + + @Test + public void canResolveChannelFactory() { + assertThat(DnsResolverLoader.init(NioDatagramChannel::new)).isInstanceOf(DnsAddressResolverGroup.class); + assertThat(DnsResolverLoader.init(EpollDatagramChannel::new)).isInstanceOf(DnsAddressResolverGroup.class); + assertThat(DnsResolverLoader.init(OioDatagramChannel::new)).isInstanceOf(DnsAddressResolverGroup.class); + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/SocketChannelResolverTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/ChannelResolverTest.java similarity index 70% rename from http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/SocketChannelResolverTest.java rename to http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/ChannelResolverTest.java index 472c417d4485..45edd2b81bb1 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/SocketChannelResolverTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/ChannelResolverTest.java @@ -16,39 +16,47 @@ package software.amazon.awssdk.http.nio.netty.internal.utils; import static org.assertj.core.api.Assertions.assertThat; -import static software.amazon.awssdk.http.nio.netty.internal.utils.SocketChannelResolver.resolveSocketChannelFactory; +import static software.amazon.awssdk.http.nio.netty.internal.utils.ChannelResolver.resolveDatagramChannelFactory; +import static software.amazon.awssdk.http.nio.netty.internal.utils.ChannelResolver.resolveSocketChannelFactory; import io.netty.channel.epoll.Epoll; +import io.netty.channel.epoll.EpollDatagramChannel; import io.netty.channel.epoll.EpollEventLoopGroup; import io.netty.channel.epoll.EpollSocketChannel; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.oio.OioEventLoopGroup; +import io.netty.channel.socket.nio.NioDatagramChannel; import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.channel.socket.oio.OioDatagramChannel; import io.netty.channel.socket.oio.OioSocketChannel; import org.junit.jupiter.api.Assumptions; import org.junit.jupiter.api.Test; import software.amazon.awssdk.http.nio.netty.internal.DelegatingEventLoopGroup; -public class SocketChannelResolverTest { +public class ChannelResolverTest { @Test public void canDetectFactoryForStandardNioEventLoopGroup() { assertThat(resolveSocketChannelFactory(new NioEventLoopGroup()).newChannel()).isInstanceOf(NioSocketChannel.class); + assertThat(resolveDatagramChannelFactory(new NioEventLoopGroup()).newChannel()).isInstanceOf(NioDatagramChannel.class); } @Test public void canDetectEpollEventLoopGroupFactory() { Assumptions.assumeTrue(Epoll.isAvailable()); assertThat(resolveSocketChannelFactory(new EpollEventLoopGroup()).newChannel()).isInstanceOf(EpollSocketChannel.class); + assertThat(resolveDatagramChannelFactory(new EpollEventLoopGroup()).newChannel()).isInstanceOf(EpollDatagramChannel.class); } @Test public void worksWithDelegateEventLoopGroupsFactory() { assertThat(resolveSocketChannelFactory(new DelegatingEventLoopGroup(new NioEventLoopGroup()) {}).newChannel()).isInstanceOf(NioSocketChannel.class); + assertThat(resolveDatagramChannelFactory(new DelegatingEventLoopGroup(new NioEventLoopGroup()) {}).newChannel()).isInstanceOf(NioDatagramChannel.class); } @Test public void worksWithOioEventLoopGroupFactory() { assertThat(resolveSocketChannelFactory(new OioEventLoopGroup()).newChannel()).isInstanceOf(OioSocketChannel.class); + assertThat(resolveDatagramChannelFactory(new OioEventLoopGroup()).newChannel()).isInstanceOf(OioDatagramChannel.class); } } diff --git a/http-clients/pom.xml b/http-clients/pom.xml index 7c5d32540aac..bbd349e6e80d 100644 --- a/http-clients/pom.xml +++ b/http-clients/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 diff --git a/http-clients/url-connection-client/pom.xml b/http-clients/url-connection-client/pom.xml index afedc11653ef..60e61061fbbe 100644 --- a/http-clients/url-connection-client/pom.xml +++ b/http-clients/url-connection-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 diff --git a/metric-publishers/cloudwatch-metric-publisher/pom.xml b/metric-publishers/cloudwatch-metric-publisher/pom.xml index d19d3a34950d..9bf10b0a07df 100644 --- a/metric-publishers/cloudwatch-metric-publisher/pom.xml +++ b/metric-publishers/cloudwatch-metric-publisher/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk metric-publishers - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT cloudwatch-metric-publisher diff --git a/metric-publishers/pom.xml b/metric-publishers/pom.xml index 547bd7ae2f00..93d3eafa592b 100644 --- a/metric-publishers/pom.xml +++ b/metric-publishers/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT metric-publishers diff --git a/pom.xml b/pom.xml index 25f092c33ac7..4e094e34898d 100644 --- a/pom.xml +++ b/pom.xml @@ -20,7 +20,7 @@ 4.0.0 software.amazon.awssdk aws-sdk-java-pom - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT pom AWS Java SDK :: Parent The Amazon Web Services SDK for Java provides Java APIs @@ -90,7 +90,7 @@ ${project.version} - 2.20.82 + 2.20.89 2.13.2 2.13.4.2 2.13.2 diff --git a/release-scripts/pom.xml b/release-scripts/pom.xml index 28abfade104e..bb792f0e5e61 100644 --- a/release-scripts/pom.xml +++ b/release-scripts/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ../pom.xml release-scripts diff --git a/services-custom/dynamodb-enhanced/pom.xml b/services-custom/dynamodb-enhanced/pom.xml index 20b7b81a4f50..7c0e236f6f9a 100644 --- a/services-custom/dynamodb-enhanced/pom.xml +++ b/services-custom/dynamodb-enhanced/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services-custom - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT dynamodb-enhanced AWS Java SDK :: DynamoDB :: Enhanced Client diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DefaultAttributeConverterProvider.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DefaultAttributeConverterProvider.java index 45db89f5283c..88cfbe39e82f 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DefaultAttributeConverterProvider.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DefaultAttributeConverterProvider.java @@ -43,7 +43,6 @@ import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.DocumentAttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.DoubleAttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.DurationAttributeConverter; -import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnumAttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.FloatAttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.InstantAsStringAttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.IntegerAttributeConverter; diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/EnumAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/EnumAttributeConverter.java new file mode 100644 index 000000000000..a44a5e2070f0 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/EnumAttributeConverter.java @@ -0,0 +1,138 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.utils.Validate; + +/** + * A converter between an {@link Enum} and {@link AttributeValue}. + * + *

    + * This stores values in DynamoDB as a string. + * + *

    + * Use EnumAttributeConverter::create in order to use Enum::toString as the enum identifier + * + *

    + * Use EnumAttributeConverter::createWithNameAsKeys in order to use Enum::name as the enum identifier + * + *

    + * This can be created via {@link #create(Class)}. + */ +@SdkPublicApi +public final class EnumAttributeConverter> implements AttributeConverter { + + private final Class enumClass; + private final Map enumValueMap; + + private final Function keyExtractor; + + private EnumAttributeConverter(Class enumClass, Function keyExtractor) { + this.enumClass = enumClass; + this.keyExtractor = keyExtractor; + + Map mutableEnumValueMap = new LinkedHashMap<>(); + Arrays.stream(enumClass.getEnumConstants()) + .forEach(enumConstant -> mutableEnumValueMap.put(keyExtractor.apply(enumConstant), enumConstant)); + + this.enumValueMap = Collections.unmodifiableMap(mutableEnumValueMap); + } + + /** + * Creates an EnumAttributeConverter for an {@link Enum}. + * + *

    + * Uses Enum::toString as the enum identifier. + * + * @param enumClass The enum class to be used + * @return an EnumAttributeConverter + * @param the enum subclass + */ + public static > EnumAttributeConverter create(Class enumClass) { + return new EnumAttributeConverter<>(enumClass, Enum::toString); + } + + /** + * Creates an EnumAttributeConverter for an {@link Enum}. + * + *

    + * Uses Enum::name as the enum identifier. + * + * @param enumClass The enum class to be used + * @return an EnumAttributeConverter + * @param the enum subclass + */ + public static > EnumAttributeConverter createWithNameAsKeys(Class enumClass) { + return new EnumAttributeConverter<>(enumClass, Enum::name); + } + + /** + * Returns the proper {@link AttributeValue} for the given enum type. + * + * @param input the enum type to be converted + * @return AttributeValue + */ + @Override + public AttributeValue transformFrom(T input) { + return AttributeValue.builder().s(keyExtractor.apply(input)).build(); + } + + /** + * Returns the proper enum type for the given {@link AttributeValue} input. + * + * @param input the AttributeValue to be converted + * @return an enum type + */ + @Override + public T transformTo(AttributeValue input) { + Validate.isTrue(input.s() != null, "Cannot convert non-string value to enum."); + T returnValue = enumValueMap.get(input.s()); + + if (returnValue == null) { + throw new IllegalArgumentException(String.format("Unable to convert string value '%s' to enum type '%s'", + input.s(), enumClass)); + } + + return returnValue; + } + + /** + * Returns the {@link EnhancedType} of the converter. + * + * @return EnhancedType + */ + @Override + public EnhancedType type() { + return EnhancedType.of(enumClass); + } + + /** + * Returns the {@link AttributeValueType} of the converter. + * + * @return AttributeValueType + */ + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/TableSchema.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/TableSchema.java index bdf4ee35cbdb..2aa9d100d2c2 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/TableSchema.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/TableSchema.java @@ -53,6 +53,18 @@ static StaticTableSchema.Builder builder(Class itemClass) { return StaticTableSchema.builder(itemClass); } + /** + * Returns a builder for the {@link StaticTableSchema} implementation of this interface which allows all attributes, + * tags and table structure to be directly declared in the builder. + * + * @param itemType The {@link EnhancedType} of the item this {@link TableSchema} will map records to. + * @param The type of the item this {@link TableSchema} will map records to. + * @return A newly initialized {@link StaticTableSchema.Builder}. + */ + static StaticTableSchema.Builder builder(EnhancedType itemType) { + return StaticTableSchema.builder(itemType); + } + /** * Returns a builder for the {@link StaticImmutableTableSchema} implementation of this interface which allows all * attributes, tags and table structure to be directly declared in the builder. @@ -69,6 +81,22 @@ static StaticImmutableTableSchema.Builder builder(Class immutabl return StaticImmutableTableSchema.builder(immutableItemClass, immutableBuilderClass); } + /** + * Returns a builder for the {@link StaticImmutableTableSchema} implementation of this interface which allows all + * attributes, tags and table structure to be directly declared in the builder. + * + * @param immutableItemType The {@link EnhancedType} of the immutable item this {@link TableSchema} will map records to. + * @param immutableBuilderType The {@link EnhancedType} of the class that can be used to construct immutable items this + * {@link TableSchema} maps records to. + * @param The type of the immutable item this {@link TableSchema} will map records to. + * @param The type of the builder used by this {@link TableSchema} to construct immutable items with. + * @return A newly initialized {@link StaticImmutableTableSchema.Builder} + */ + static StaticImmutableTableSchema.Builder builder(EnhancedType immutableItemType, + EnhancedType immutableBuilderType) { + return StaticImmutableTableSchema.builder(immutableItemType, immutableBuilderType); + } + /** * Scans a bean class that has been annotated with DynamoDb bean annotations and then returns a * {@link BeanTableSchema} implementation of this interface that can map records to and from items of that bean diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTable.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTable.java index 6c9f0f69265e..8b6d8412969b 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTable.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTable.java @@ -15,13 +15,20 @@ package software.amazon.awssdk.enhanced.dynamodb.internal.client; +import static java.util.Collections.emptyList; import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.createKeyFromItem; +import java.util.Collection; +import java.util.List; +import java.util.Map; import java.util.function.Consumer; +import java.util.stream.Collectors; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.IndexMetadata; import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.KeyAttributeMetadata; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.CreateTableOperation; @@ -39,6 +46,8 @@ import software.amazon.awssdk.enhanced.dynamodb.model.DeleteItemEnhancedRequest; import software.amazon.awssdk.enhanced.dynamodb.model.DeleteItemEnhancedResponse; import software.amazon.awssdk.enhanced.dynamodb.model.DescribeTableEnhancedResponse; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedGlobalSecondaryIndex; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedLocalSecondaryIndex; import software.amazon.awssdk.enhanced.dynamodb.model.GetItemEnhancedRequest; import software.amazon.awssdk.enhanced.dynamodb.model.PageIterable; import software.amazon.awssdk.enhanced.dynamodb.model.PutItemEnhancedRequest; @@ -51,6 +60,7 @@ import software.amazon.awssdk.services.dynamodb.DynamoDbClient; import software.amazon.awssdk.services.dynamodb.model.DescribeTableRequest; import software.amazon.awssdk.services.dynamodb.model.DescribeTableResponse; +import software.amazon.awssdk.services.dynamodb.model.ProjectionType; @SdkInternalApi public class DefaultDynamoDbTable implements DynamoDbTable { @@ -115,7 +125,51 @@ public void createTable(Consumer requestCons @Override public void createTable() { - createTable(CreateTableEnhancedRequest.builder().build()); + Map> indexGroups = splitSecondaryIndicesToLocalAndGlobalOnes(); + createTable(CreateTableEnhancedRequest.builder() + .localSecondaryIndices(extractLocalSecondaryIndices(indexGroups)) + .globalSecondaryIndices(extractGlobalSecondaryIndices(indexGroups)) + .build()); + } + + private Map> splitSecondaryIndicesToLocalAndGlobalOnes() { + String primaryPartitionKeyName = tableSchema.tableMetadata().primaryPartitionKey(); + Collection indices = tableSchema.tableMetadata().indices(); + return indices.stream() + .filter(index -> !TableMetadata.primaryIndexName().equals(index.name())) + .collect(Collectors.groupingBy(metadata -> { + String partitionKeyName = metadata.partitionKey().map(KeyAttributeMetadata::name).orElse(null); + if (partitionKeyName == null || primaryPartitionKeyName.equals(partitionKeyName)) { + return IndexType.LSI; + } + return IndexType.GSI; + })); + } + + private List extractLocalSecondaryIndices(Map> indicesGroups) { + return indicesGroups.getOrDefault(IndexType.LSI, emptyList()).stream() + .map(this::mapIndexMetadataToEnhancedLocalSecondaryIndex) + .collect(Collectors.toList()); + } + + private EnhancedLocalSecondaryIndex mapIndexMetadataToEnhancedLocalSecondaryIndex(IndexMetadata indexMetadata) { + return EnhancedLocalSecondaryIndex.builder() + .indexName(indexMetadata.name()) + .projection(pb -> pb.projectionType(ProjectionType.ALL)) + .build(); + } + + private List extractGlobalSecondaryIndices(Map> indicesGroups) { + return indicesGroups.getOrDefault(IndexType.GSI, emptyList()).stream() + .map(this::mapIndexMetadataToEnhancedGlobalSecondaryIndex) + .collect(Collectors.toList()); + } + + private EnhancedGlobalSecondaryIndex mapIndexMetadataToEnhancedGlobalSecondaryIndex(IndexMetadata indexMetadata) { + return EnhancedGlobalSecondaryIndex.builder() + .indexName(indexMetadata.name()) + .projection(pb -> pb.projectionType(ProjectionType.ALL)) + .build(); } @Override diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/IndexType.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/IndexType.java new file mode 100644 index 000000000000..0fd1fc28cd82 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/IndexType.java @@ -0,0 +1,27 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.client; + +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * Enum collecting types of secondary indexes + */ +@SdkInternalApi +public enum IndexType { + LSI, + GSI +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/EnumAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/EnumAttributeConverter.java deleted file mode 100644 index 18395a82656b..000000000000 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/EnumAttributeConverter.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; - -import java.util.Arrays; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.Map; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; -import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; -import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.utils.Validate; - -/** - * A converter between an {@link Enum} and {@link AttributeValue}. - * - *

    - * This stores values in DynamoDB as a string. - * - *

    - * This can be created via {@link #create(Class)}. - */ -@SdkInternalApi -public class EnumAttributeConverter> implements AttributeConverter { - - private final Class enumClass; - private final Map enumValueMap; - - private EnumAttributeConverter(Class enumClass) { - this.enumClass = enumClass; - - Map mutableEnumValueMap = new LinkedHashMap<>(); - Arrays.stream(enumClass.getEnumConstants()) - .forEach(enumConstant -> mutableEnumValueMap.put(enumConstant.toString(), enumConstant)); - - this.enumValueMap = Collections.unmodifiableMap(mutableEnumValueMap); - } - - public static > EnumAttributeConverter create(Class enumClass) { - return new EnumAttributeConverter<>(enumClass); - } - - @Override - public AttributeValue transformFrom(T input) { - return AttributeValue.builder().s(input.toString()).build(); - } - - @Override - public T transformTo(AttributeValue input) { - Validate.isTrue(input.s() != null, "Cannot convert non-string value to enum."); - T returnValue = enumValueMap.get(input.s()); - - if (returnValue == null) { - throw new IllegalArgumentException(String.format("Unable to convert string value '%s' to enum type '%s'", - input.s(), enumClass)); - } - - return returnValue; - } - - @Override - public EnhancedType type() { - return EnhancedType.of(enumClass); - } - - @Override - public AttributeValueType attributeValueType() { - return AttributeValueType.S; - } -} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CreateTableOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CreateTableOperation.java index 24a144ae08b8..de3887081515 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CreateTableOperation.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CreateTableOperation.java @@ -74,7 +74,7 @@ public CreateTableRequest generateRequest(TableSchema tableSchema, List sdkGlobalSecondaryIndices = null; List sdkLocalSecondaryIndices = null; - if (this.request.globalSecondaryIndices() != null) { + if (this.request.globalSecondaryIndices() != null && !this.request.globalSecondaryIndices().isEmpty()) { sdkGlobalSecondaryIndices = this.request.globalSecondaryIndices().stream().map(gsi -> { String indexPartitionKey = tableSchema.tableMetadata().indexPartitionKey(gsi.indexName()); @@ -92,7 +92,7 @@ public CreateTableRequest generateRequest(TableSchema tableSchema, }).collect(Collectors.toList()); } - if (this.request.localSecondaryIndices() != null) { + if (this.request.localSecondaryIndices() != null && !this.request.localSecondaryIndices().isEmpty()) { sdkLocalSecondaryIndices = this.request.localSecondaryIndices().stream().map(lsi -> { Optional indexSortKey = tableSchema.tableMetadata().indexSortKey(lsi.indexName()); diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/ImmutableAttribute.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/ImmutableAttribute.java index d22f87af61a9..d5622bfc6df6 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/ImmutableAttribute.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/ImmutableAttribute.java @@ -91,6 +91,19 @@ public static Builder builder(Class itemClass, return new Builder<>(attributeType); } + /** + * Constructs a new builder for this class using supplied types. + * @param itemType The {@link EnhancedType} of the immutable item that this attribute composes. + * @param builderType The {@link EnhancedType} of the builder for the immutable item that this attribute composes. + * @param attributeType A {@link EnhancedType} that represents the type of the value this attribute stores. + * @return A new typed builder for an attribute. + */ + public static Builder builder(EnhancedType itemType, + EnhancedType builderType, + EnhancedType attributeType) { + return new Builder<>(attributeType); + } + /** * Constructs a new builder for this class using supplied types. * @param itemClass The class of the item that this attribute composes. diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttribute.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttribute.java index 2957311d7417..5071869347c8 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttribute.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttribute.java @@ -69,7 +69,17 @@ private StaticAttribute(Builder builder) { * @return A new typed builder for an attribute. */ public static Builder builder(Class itemClass, EnhancedType attributeType) { - return new Builder<>(itemClass, attributeType); + return new Builder<>(EnhancedType.of(itemClass), attributeType); + } + + /** + * Constructs a new builder for this class using supplied types. + * @param itemType The {@link EnhancedType} of the item that this attribute composes. + * @param attributeType A {@link EnhancedType} that represents the type of the value this attribute stores. + * @return A new typed builder for an attribute. + */ + public static Builder builder(EnhancedType itemType, EnhancedType attributeType) { + return new Builder<>(itemType, attributeType); } /** @@ -79,7 +89,7 @@ public static Builder builder(Class itemClass, EnhancedType a * @return A new typed builder for an attribute. */ public static Builder builder(Class itemClass, Class attributeClass) { - return new Builder<>(itemClass, EnhancedType.of(attributeClass)); + return new Builder<>(EnhancedType.of(itemClass), EnhancedType.of(attributeClass)); } /** @@ -146,8 +156,8 @@ ImmutableAttribute toImmutableAttribute() { public static final class Builder { private final ImmutableAttribute.Builder delegateBuilder; - private Builder(Class itemClass, EnhancedType type) { - this.delegateBuilder = ImmutableAttribute.builder(itemClass, itemClass, type); + private Builder(EnhancedType itemType, EnhancedType type) { + this.delegateBuilder = ImmutableAttribute.builder(itemType, itemType, type); } private Builder(ImmutableAttribute.Builder delegateBuilder) { diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchema.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchema.java index 5d08ee4a3ae3..ea86ac9fcec4 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchema.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchema.java @@ -210,7 +210,7 @@ private StaticImmutableTableSchema(Builder builder) { this.newBuilderSupplier = builder.newBuilderSupplier; this.buildItemFunction = builder.buildItemFunction; this.tableMetadata = tableMetadataBuilder.build(); - this.itemType = EnhancedType.of(builder.itemClass); + this.itemType = builder.itemType; } /** @@ -220,7 +220,18 @@ private StaticImmutableTableSchema(Builder builder) { * @return A newly initialized builder */ public static Builder builder(Class itemClass, Class builderClass) { - return new Builder<>(itemClass, builderClass); + return new Builder<>(EnhancedType.of(itemClass), EnhancedType.of(builderClass)); + } + + /** + * Creates a builder for a {@link StaticImmutableTableSchema} typed to specific immutable data item class. + * @param itemType The {@link EnhancedType} of the immutable data item class object that the + * {@link StaticImmutableTableSchema} is to map to. + * @param builderType The builder {@link EnhancedType} that can be used to construct instances of the immutable data item. + * @return A newly initialized builder + */ + public static Builder builder(EnhancedType itemType, EnhancedType builderType) { + return new Builder<>(itemType, builderType); } /** @@ -230,8 +241,8 @@ public static Builder builder(Class itemClass, Class builderC */ @NotThreadSafe public static final class Builder { - private final Class itemClass; - private final Class builderClass; + private final EnhancedType itemType; + private final EnhancedType builderType; private final List> additionalAttributes = new ArrayList<>(); private final List> flattenedMappers = new ArrayList<>(); @@ -242,9 +253,9 @@ public static final class Builder { private List attributeConverterProviders = Collections.singletonList(ConverterProviderResolver.defaultConverterProvider()); - private Builder(Class itemClass, Class builderClass) { - this.itemClass = itemClass; - this.builderClass = builderClass; + private Builder(EnhancedType itemType, EnhancedType builderType) { + this.itemType = itemType; + this.builderType = builderType; } /** @@ -285,7 +296,7 @@ public Builder addAttribute(EnhancedType attributeType, Consumer> immutableAttribute) { ImmutableAttribute.Builder builder = - ImmutableAttribute.builder(itemClass, builderClass, attributeType); + ImmutableAttribute.builder(itemType, builderType, attributeType); immutableAttribute.accept(builder); return addAttribute(builder.build()); } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchema.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchema.java index 5d8dbfd94b76..6dc6b2d4f211 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchema.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchema.java @@ -75,7 +75,16 @@ private StaticTableSchema(Builder builder) { * @return A newly initialized builder */ public static Builder builder(Class itemClass) { - return new Builder<>(itemClass); + return new Builder<>(EnhancedType.of(itemClass)); + } + + /** + * Creates a builder for a {@link StaticTableSchema} typed to specific data item class. + * @param itemType The {@link EnhancedType} of the data item class object that the {@link StaticTableSchema} is to map to. + * @return A newly initialized builder + */ + public static Builder builder(EnhancedType itemType) { + return new Builder<>(itemType); } /** @@ -85,11 +94,11 @@ public static Builder builder(Class itemClass) { @NotThreadSafe public static final class Builder { private final StaticImmutableTableSchema.Builder delegateBuilder; - private final Class itemClass; + private final EnhancedType itemType; - private Builder(Class itemClass) { - this.delegateBuilder = StaticImmutableTableSchema.builder(itemClass, itemClass); - this.itemClass = itemClass; + private Builder(EnhancedType itemType) { + this.delegateBuilder = StaticImmutableTableSchema.builder(itemType, itemType); + this.itemType = itemType; } /** @@ -130,7 +139,7 @@ public Builder attributes(Collection> staticAttributes) */ public Builder addAttribute(EnhancedType attributeType, Consumer> staticAttribute) { - StaticAttribute.Builder builder = StaticAttribute.builder(itemClass, attributeType); + StaticAttribute.Builder builder = StaticAttribute.builder(itemType, attributeType); staticAttribute.accept(builder); this.delegateBuilder.addAttribute(builder.build().toImmutableAttribute()); return this; @@ -142,7 +151,7 @@ public Builder addAttribute(EnhancedType attributeType, */ public Builder addAttribute(Class attributeClass, Consumer> staticAttribute) { - StaticAttribute.Builder builder = StaticAttribute.builder(itemClass, attributeClass); + StaticAttribute.Builder builder = StaticAttribute.builder(itemType, EnhancedType.of(attributeClass)); staticAttribute.accept(builder); this.delegateBuilder.addAttribute(builder.build().toImmutableAttribute()); return this; diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/TableSchemaTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/TableSchemaTest.java index d42296dfe110..daac73362923 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/TableSchemaTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/TableSchemaTest.java @@ -23,6 +23,7 @@ import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; import software.amazon.awssdk.enhanced.dynamodb.mapper.BeanTableSchema; import software.amazon.awssdk.enhanced.dynamodb.mapper.ImmutableTableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticImmutableTableSchema; import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.InvalidBean; import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.SimpleBean; @@ -33,11 +34,31 @@ public class TableSchemaTest { public ExpectedException exception = ExpectedException.none(); @Test - public void builder_constructsStaticTableSchemaBuilder() { + public void builder_constructsStaticTableSchemaBuilder_fromClass() { StaticTableSchema.Builder builder = TableSchema.builder(FakeItem.class); assertThat(builder).isNotNull(); } + @Test + public void builder_constructsStaticTableSchemaBuilder_fromEnhancedType() { + StaticTableSchema.Builder builder = TableSchema.builder(EnhancedType.of(FakeItem.class)); + assertThat(builder).isNotNull(); + } + + @Test + public void builder_constructsStaticImmutableTableSchemaBuilder_fromClass() { + StaticImmutableTableSchema.Builder builder = + TableSchema.builder(SimpleImmutable.class, SimpleImmutable.Builder.class); + assertThat(builder).isNotNull(); + } + + @Test + public void builder_constructsStaticImmutableTableSchemaBuilder_fromEnhancedType() { + StaticImmutableTableSchema.Builder builder = + TableSchema.builder(EnhancedType.of(SimpleImmutable.class), EnhancedType.of(SimpleImmutable.Builder.class)); + assertThat(builder).isNotNull(); + } + @Test public void fromBean_constructsBeanTableSchema() { BeanTableSchema beanBeanTableSchema = TableSchema.fromBean(SimpleBean.class); diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/EnumAttributeConverterTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/EnumAttributeConverterTest.java new file mode 100644 index 000000000000..fe17f3050533 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/EnumAttributeConverterTest.java @@ -0,0 +1,113 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; + +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.enhanced.dynamodb.EnumAttributeConverter; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +import static org.assertj.core.api.Assertions.assertThat; + +public class EnumAttributeConverterTest { + + @Test + public void transformFromDefault_returnsToString() { + EnumAttributeConverter vehicleConverter = EnumAttributeConverter.create(Vehicle.class); + AttributeValue attribute = vehicleConverter.transformFrom(Vehicle.TRUCK); + + assertThat(attribute.s()).isEqualTo("TRUCK"); + } + + @Test + public void transformToDefault_returnsEnum() { + EnumAttributeConverter vehicleConverter = EnumAttributeConverter.create(Vehicle.class); + + Vehicle bike = vehicleConverter.transformTo(AttributeValue.fromS("BIKE")); + + assertThat(bike).isEqualTo(Vehicle.BIKE); + } + + @Test + public void transformFromDefault_returnsToString_2() { + EnumAttributeConverter animalConverter = EnumAttributeConverter.create(Animal.class); + AttributeValue attribute = animalConverter.transformFrom(Animal.CAT); + + assertThat(attribute.s()).isEqualTo("I am a Cat!"); + } + + @Test + public void transformToDefault_returnsEnum_2() { + EnumAttributeConverter animalConverter = EnumAttributeConverter.create(Animal.class); + + Animal dog = animalConverter.transformTo(AttributeValue.fromS("I am a Dog!")); + + assertThat(dog).isEqualTo(Animal.DOG); + } + + @Test + public void transformFromWithNames_returnsName() { + EnumAttributeConverter personConverter = EnumAttributeConverter.createWithNameAsKeys(Person.class); + AttributeValue attribute = personConverter.transformFrom(Person.JANE); + + assertThat(attribute.s()).isEqualTo("JANE"); + + assertThat(Person.JANE.toString()).isEqualTo("I am a cool person"); + } + + @Test + public void transformToWithNames_returnsEnum() { + EnumAttributeConverter personConverter = EnumAttributeConverter.createWithNameAsKeys(Person.class); + + Person john = personConverter.transformTo(AttributeValue.fromS("JOHN")); + + assertThat(Person.JOHN.toString()).isEqualTo("I am a cool person"); + + assertThat(john).isEqualTo(Person.JOHN); + } + + private static enum Vehicle { + CAR, + BIKE, + TRUCK + } + + private static enum Animal { + DOG, + CAT; + + @Override + public String toString() { + switch (this) { + case DOG: + return "I am a Dog!"; + case CAT: + return "I am a Cat!"; + default: + return null; + } + } + } + + private static enum Person { + JOHN, + JANE; + + @Override + public String toString() { + return "I am a cool person"; + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTableTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTableTest.java index b268f2928855..e78c4ea36207 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTableTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTableTest.java @@ -16,20 +16,30 @@ package software.amazon.awssdk.enhanced.dynamodb.internal.client; import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Mockito.verify; import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; +import java.util.Iterator; +import java.util.List; import java.util.Optional; +import java.util.stream.Collectors; import org.junit.Test; import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; import org.mockito.Mock; +import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; import software.amazon.awssdk.enhanced.dynamodb.Key; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithIndices; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; +import software.amazon.awssdk.enhanced.dynamodb.model.CreateTableEnhancedRequest; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedGlobalSecondaryIndex; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedLocalSecondaryIndex; import software.amazon.awssdk.services.dynamodb.DynamoDbClient; @RunWith(MockitoJUnitRunner.class) @@ -113,4 +123,50 @@ public void keyFrom_primaryIndex_partitionAndNullSort() { assertThat(key.partitionKeyValue(), is(stringValue(item.getId()))); assertThat(key.sortKeyValue(), is(Optional.empty())); } + + @Test + public void createTable_doesNotTreatPrimaryIndexAsAnyOfSecondaryIndexes() { + DefaultDynamoDbTable dynamoDbMappedIndex = + Mockito.spy(new DefaultDynamoDbTable<>(mockDynamoDbClient, + mockDynamoDbEnhancedClientExtension, + FakeItem.getTableSchema(), + "test_table")); + + dynamoDbMappedIndex.createTable(); + + CreateTableEnhancedRequest request = captureCreateTableRequest(dynamoDbMappedIndex); + + assertThat(request.localSecondaryIndices().size(), is(0)); + assertThat(request.globalSecondaryIndices().size(), is(0)); + } + + @Test + public void createTable_groupsSecondaryIndexesExistingInTableSchema() { + DefaultDynamoDbTable dynamoDbMappedIndex = + Mockito.spy(new DefaultDynamoDbTable<>(mockDynamoDbClient, + mockDynamoDbEnhancedClientExtension, + FakeItemWithIndices.getTableSchema(), + "test_table")); + + dynamoDbMappedIndex.createTable(); + + CreateTableEnhancedRequest request = captureCreateTableRequest(dynamoDbMappedIndex); + + assertThat(request.localSecondaryIndices().size(), is(1)); + Iterator lsiIterator = request.localSecondaryIndices().iterator(); + assertThat(lsiIterator.next().indexName(), is("lsi_1")); + + assertThat(request.globalSecondaryIndices().size(), is(2)); + List globalIndicesNames = request.globalSecondaryIndices().stream() + .map(EnhancedGlobalSecondaryIndex::indexName) + .collect(Collectors.toList()); + assertThat(globalIndicesNames, containsInAnyOrder("gsi_1", "gsi_2")); + } + + private static CreateTableEnhancedRequest captureCreateTableRequest(DefaultDynamoDbTable index) { + ArgumentCaptor createTableOperationCaptor = + ArgumentCaptor.forClass(CreateTableEnhancedRequest.class); + verify(index).createTable(createTableOperationCaptor.capture()); + return createTableOperationCaptor.getValue(); + } } diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchemaTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchemaTest.java index 2df2c4b052e1..5c1b8b2a4d11 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchemaTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticImmutableTableSchemaTest.java @@ -59,6 +59,7 @@ import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemComposedClass; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testimmutables.EntityEnvelopeImmutable; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; @RunWith(MockitoJUnitRunner.class) @@ -801,6 +802,17 @@ public void itemType_returnsCorrectClass() { assertThat(FakeItem.getTableSchema().itemType(), is(equalTo(EnhancedType.of(FakeItem.class)))); } + @Test + public void itemType_returnsCorrectClassWhenBuiltWithEnhancedType() { + StaticImmutableTableSchema tableSchema = + StaticImmutableTableSchema.builder(EnhancedType.of(FakeMappedItem.class), + EnhancedType.of(FakeMappedItem.Builder.class)) + .newItemBuilder(FakeMappedItem::builder, FakeMappedItem.Builder::build) + .build(); + + assertThat(tableSchema.itemType(), is(equalTo(EnhancedType.of(FakeMappedItem.class)))); + } + @Test public void getTableMetadata_hasCorrectFields() { TableMetadata tableMetadata = FakeItemWithSort.getTableSchema().tableMetadata(); @@ -1538,6 +1550,27 @@ public void noConverterProvider_handlesCorrectly_whenAttributeConvertersAreSuppl assertThat(resultMap.get("aString").s(), is(expectedString)); } + @Test + public void builder_canBuildForGenericClassType() { + StaticImmutableTableSchema, EntityEnvelopeImmutable.Builder> envelopeTableSchema = + StaticImmutableTableSchema.builder(new EnhancedType>() {}, + new EnhancedType>() {}) + .newItemBuilder(EntityEnvelopeImmutable.Builder::new, EntityEnvelopeImmutable.Builder::build) + .addAttribute(String.class, + a -> a.name("entity") + .getter(EntityEnvelopeImmutable::entity) + .setter(EntityEnvelopeImmutable.Builder::setEntity)) + .build(); + + EntityEnvelopeImmutable testEnvelope = new EntityEnvelopeImmutable<>("test-value"); + + Map expectedMap = + Collections.singletonMap("entity", AttributeValue.fromS("test-value")); + + assertThat(envelopeTableSchema.itemToMap(testEnvelope, false), equalTo(expectedMap)); + assertThat(envelopeTableSchema.mapToItem(expectedMap).entity(), equalTo("test-value")); + } + private void verifyAttribute(EnhancedType attributeType, Consumer> staticAttribute, FakeMappedItem fakeMappedItem, diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchemaTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchemaTest.java index fc43da907b08..368ef26b9648 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchemaTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchemaTest.java @@ -57,6 +57,7 @@ import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemComposedClass; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.EntityEnvelopeBean; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; @RunWith(MockitoJUnitRunner.class) @@ -799,6 +800,16 @@ public void itemType_returnsCorrectClass() { assertThat(FakeItem.getTableSchema().itemType(), is(equalTo(EnhancedType.of(FakeItem.class)))); } + @Test + public void itemType_returnsCorrectClassWhenBuiltWithEnhancedType() { + StaticTableSchema tableSchema = StaticTableSchema.builder(EnhancedType.of(FakeMappedItem.class)) + .newItemSupplier(FakeMappedItem::new) + .attributes(ATTRIBUTES) + .build(); + + assertThat(tableSchema.itemType(), is(equalTo(EnhancedType.of(FakeMappedItem.class)))); + } + @Test public void getTableMetadata_hasCorrectFields() { TableMetadata tableMetadata = FakeItemWithSort.getTableSchema().tableMetadata(); @@ -1485,6 +1496,27 @@ public void noConverterProvider_handlesCorrectly_whenAttributeConvertersAreSuppl assertThat(resultMap.get("aString").s(), is(expectedString)); } + @Test + public void builder_canBuildForGenericClassType() { + StaticTableSchema> envelopeTableSchema = + StaticTableSchema.builder(new EnhancedType>() {}) + .newItemSupplier(EntityEnvelopeBean::new) + .addAttribute(String.class, + a -> a.name("entity") + .getter(EntityEnvelopeBean::getEntity) + .setter(EntityEnvelopeBean::setEntity)) + .build(); + + EntityEnvelopeBean testEnvelope = new EntityEnvelopeBean<>(); + testEnvelope.setEntity("test-value"); + + Map expectedMap = + Collections.singletonMap("entity", AttributeValue.fromS("test-value")); + + assertThat(envelopeTableSchema.itemToMap(testEnvelope, false), equalTo(expectedMap)); + assertThat(envelopeTableSchema.mapToItem(expectedMap).getEntity(), equalTo("test-value")); + } + private void verifyAttribute(EnhancedType attributeType, Consumer> staticAttribute, FakeMappedItem fakeMappedItem, diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/EntityEnvelopeBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/EntityEnvelopeBean.java new file mode 100644 index 000000000000..5097ae8d6747 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/EntityEnvelopeBean.java @@ -0,0 +1,28 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +public class EntityEnvelopeBean { + private T entity; + + public T getEntity() { + return this.entity; + } + + public void setEntity(T entity) { + this.entity = entity; + } +} \ No newline at end of file diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testimmutables/EntityEnvelopeImmutable.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testimmutables/EntityEnvelopeImmutable.java new file mode 100644 index 000000000000..8be0b00f0d70 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testimmutables/EntityEnvelopeImmutable.java @@ -0,0 +1,41 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testimmutables; + +public class EntityEnvelopeImmutable { + private final T entity; + + public EntityEnvelopeImmutable(T entity) { + this.entity = entity; + } + + public T entity() { + return this.entity; + } + + public static class Builder { + private T entity; + + public void setEntity(T entity) { + this.entity = entity; + } + + public EntityEnvelopeImmutable build() { + return new EntityEnvelopeImmutable<>(this.entity); + } + } +} + diff --git a/services-custom/pom.xml b/services-custom/pom.xml index e407bae2692b..51847326b4ba 100644 --- a/services-custom/pom.xml +++ b/services-custom/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT services-custom AWS Java SDK :: Custom Services diff --git a/services-custom/s3-transfer-manager/pom.xml b/services-custom/s3-transfer-manager/pom.xml index 4feab92315f7..83edd96d9545 100644 --- a/services-custom/s3-transfer-manager/pom.xml +++ b/services-custom/s3-transfer-manager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ../../pom.xml s3-transfer-manager diff --git a/services-custom/s3-transfer-manager/src/it/java/software/amazon/awssdk/transfer/s3/S3IntegrationTestBase.java b/services-custom/s3-transfer-manager/src/it/java/software/amazon/awssdk/transfer/s3/S3IntegrationTestBase.java index 28f054ebd02a..94aaaf03b6dc 100644 --- a/services-custom/s3-transfer-manager/src/it/java/software/amazon/awssdk/transfer/s3/S3IntegrationTestBase.java +++ b/services-custom/s3-transfer-manager/src/it/java/software/amazon/awssdk/transfer/s3/S3IntegrationTestBase.java @@ -118,7 +118,7 @@ private static void createBucket(String bucketName, int retryCount) { if (e.awsErrorDetails().errorCode().equals("BucketAlreadyOwnedByYou")) { System.err.printf("%s bucket already exists, likely leaked by a previous run\n", bucketName); } else if (e.awsErrorDetails().errorCode().equals("TooManyBuckets")) { - System.err.println("Printing all buckets for debug:"); + System.err.println("Error: TooManyBuckets. Printing all buckets for debug:"); s3.listBuckets().buckets().forEach(System.err::println); if (retryCount < 2) { System.err.println("Retrying..."); diff --git a/services/accessanalyzer/pom.xml b/services/accessanalyzer/pom.xml index e77ac9e63b19..bc1ce7c6e98f 100644 --- a/services/accessanalyzer/pom.xml +++ b/services/accessanalyzer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT accessanalyzer AWS Java SDK :: Services :: AccessAnalyzer diff --git a/services/account/pom.xml b/services/account/pom.xml index 25ac6295457a..fd2b2bb74b6f 100644 --- a/services/account/pom.xml +++ b/services/account/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT account AWS Java SDK :: Services :: Account diff --git a/services/account/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/account/src/main/resources/codegen-resources/endpoint-rule-set.json index bcb16c007371..e9b05bd2c3f8 100644 --- a/services/account/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/account/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -138,208 +138,40 @@ }, "aws" ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://account-fips.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://account-fips.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseFIPS" }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } + false ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://account.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseDualStack" }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + false + ] + } + ], + "endpoint": { + "url": "https://account.us-east-1.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "account", + "signingRegion": "us-east-1" } ] }, - { - "conditions": [], - "endpoint": { - "url": "https://account.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "account", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] + "headers": {} + }, + "type": "endpoint" }, { "conditions": [ @@ -357,208 +189,40 @@ }, "aws-cn" ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://account-fips.{Region}.api.amazonwebservices.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://account-fips.{Region}.amazonaws.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseFIPS" }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } + false ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://account.{Region}.api.amazonwebservices.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseDualStack" }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + false + ] + } + ], + "endpoint": { + "url": "https://account.cn-northwest-1.amazonaws.com.cn", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "account", + "signingRegion": "cn-northwest-1" } ] }, - { - "conditions": [], - "endpoint": { - "url": "https://account.cn-northwest-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "account", - "signingRegion": "cn-northwest-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] + "headers": {} + }, + "type": "endpoint" }, { "conditions": [ @@ -762,60 +426,6 @@ "conditions": [], "type": "tree", "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-global" - ] - } - ], - "endpoint": { - "url": "https://account.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "account", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-cn-global" - ] - } - ], - "endpoint": { - "url": "https://account.cn-northwest-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "account", - "signingRegion": "cn-northwest-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, { "conditions": [], "endpoint": { diff --git a/services/account/src/main/resources/codegen-resources/endpoint-tests.json b/services/account/src/main/resources/codegen-resources/endpoint-tests.json index b1e600ee2323..ac318cb0f9c7 100644 --- a/services/account/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/account/src/main/resources/codegen-resources/endpoint-tests.json @@ -218,6 +218,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { @@ -231,6 +242,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { @@ -244,6 +266,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { @@ -257,6 +290,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { @@ -320,6 +364,12 @@ "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/account/src/main/resources/codegen-resources/paginators-1.json b/services/account/src/main/resources/codegen-resources/paginators-1.json index cdd3aae8c98f..5e75ec80cb62 100644 --- a/services/account/src/main/resources/codegen-resources/paginators-1.json +++ b/services/account/src/main/resources/codegen-resources/paginators-1.json @@ -3,7 +3,8 @@ "ListRegions": { "input_token": "NextToken", "output_token": "NextToken", - "limit_key": "MaxResults" + "limit_key": "MaxResults", + "result_key": "Regions" } } } diff --git a/services/acm/pom.xml b/services/acm/pom.xml index 5801bc8ffccc..a1ee9f85cdfa 100644 --- a/services/acm/pom.xml +++ b/services/acm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT acm AWS Java SDK :: Services :: AWS Certificate Manager diff --git a/services/acmpca/pom.xml b/services/acmpca/pom.xml index 2793012f5529..edbf08db7911 100644 --- a/services/acmpca/pom.xml +++ b/services/acmpca/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT acmpca AWS Java SDK :: Services :: ACM PCA diff --git a/services/acmpca/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/acmpca/src/main/resources/codegen-resources/endpoint-rule-set.json index 6979e0ffff82..ad3be5fe1b49 100644 --- a/services/acmpca/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/acmpca/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,199 +111,263 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + }, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] }, - "supportsDualStack" + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://acm-pca-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://acm-pca-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ - "aws-us-gov", + true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "name" + "supportsFIPS" ] } ] } ], - "endpoint": { - "url": "https://acm-pca.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-us-gov", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://acm-pca.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://acm-pca-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] }, { "conditions": [], - "endpoint": { - "url": "https://acm-pca-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://acm-pca.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], - "endpoint": { - "url": "https://acm-pca.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://acm-pca.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://acm-pca.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/acmpca/src/main/resources/codegen-resources/endpoint-tests.json b/services/acmpca/src/main/resources/codegen-resources/endpoint-tests.json index 0fda4c37f98a..d1aad24c9dda 100644 --- a/services/acmpca/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/acmpca/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,980 +1,31 @@ { "testCases": [ { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-south-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.ap-south-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.ap-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-south-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-south-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.us-gov-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-gov-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-gov-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.me-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.me-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.me-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.me-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-central-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-central-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.ap-northeast-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.ap-northeast-3.amazonaws.com" + "url": "https://acm-pca.af-south-1.amazonaws.com" } }, "params": { + "Region": "af-south-1", "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-2", "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.ap-northeast-2.amazonaws.com" + "url": "https://acm-pca.ap-east-1.amazonaws.com" } }, "params": { + "Region": "ap-east-1", "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", "UseDualStack": false } }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-1", - "UseDualStack": true - } - }, { "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { @@ -983,680 +34,524 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-northeast-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.me-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.me-south-1.api.aws" - } - }, - "params": { "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.me-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.sa-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "sa-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "sa-east-1", "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.sa-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.sa-east-1.amazonaws.com" + "url": "https://acm-pca.ap-northeast-2.amazonaws.com" } }, "params": { + "Region": "ap-northeast-2", "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.ap-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-east-1", "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca.ap-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.ap-east-1.amazonaws.com" + "url": "https://acm-pca.ap-northeast-3.amazonaws.com" } }, "params": { + "Region": "ap-northeast-3", "UseFIPS": false, - "Region": "ap-east-1", "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseFIPS": true, - "Region": "cn-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.cn-north-1.amazonaws.com.cn" + "url": "https://acm-pca.ap-south-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "cn-north-1", + "Region": "ap-south-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://acm-pca.ap-southeast-1.amazonaws.com" } }, "params": { + "Region": "ap-southeast-1", "UseFIPS": false, - "Region": "cn-north-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.cn-north-1.amazonaws.com.cn" + "url": "https://acm-pca.ap-southeast-2.amazonaws.com" } }, "params": { + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "cn-north-1", "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://acm-pca-fips.us-gov-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.us-gov-west-1.amazonaws.com" + "url": "https://acm-pca.ap-southeast-3.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "us-gov-west-1", + "Region": "ap-southeast-3", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.us-gov-west-1.api.aws" + "url": "https://acm-pca.ca-central-1.amazonaws.com" } }, "params": { + "Region": "ca-central-1", "UseFIPS": false, - "Region": "us-gov-west-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.us-gov-west-1.amazonaws.com" + "url": "https://acm-pca-fips.ca-central-1.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "us-gov-west-1", + "Region": "ca-central-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.ap-southeast-1.api.aws" + "url": "https://acm-pca.eu-central-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-1", - "UseDualStack": true + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.ap-southeast-1.amazonaws.com" + "url": "https://acm-pca.eu-north-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-1", + "Region": "eu-north-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.ap-southeast-1.api.aws" + "url": "https://acm-pca.eu-south-1.amazonaws.com" } }, "params": { + "Region": "eu-south-1", "UseFIPS": false, - "Region": "ap-southeast-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.ap-southeast-1.amazonaws.com" + "url": "https://acm-pca.eu-west-1.amazonaws.com" } }, "params": { + "Region": "eu-west-1", "UseFIPS": false, - "Region": "ap-southeast-1", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.ap-southeast-2.api.aws" + "url": "https://acm-pca.eu-west-2.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-2", - "UseDualStack": true + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.ap-southeast-2.amazonaws.com" + "url": "https://acm-pca.eu-west-3.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-2", + "Region": "eu-west-3", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.ap-southeast-2.api.aws" + "url": "https://acm-pca.me-south-1.amazonaws.com" } }, "params": { + "Region": "me-south-1", "UseFIPS": false, - "Region": "ap-southeast-2", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.ap-southeast-2.amazonaws.com" + "url": "https://acm-pca.sa-east-1.amazonaws.com" } }, "params": { + "Region": "sa-east-1", "UseFIPS": false, - "Region": "ap-southeast-2", "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://acm-pca.us-east-1.amazonaws.com" + } }, "params": { - "UseFIPS": true, - "Region": "us-iso-east-1", - "UseDualStack": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://acm-pca-fips.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-iso-east-1", "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://acm-pca.us-east-2.amazonaws.com" + } }, "params": { + "Region": "us-east-2", "UseFIPS": false, - "Region": "us-iso-east-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.us-iso-east-1.c2s.ic.gov" + "url": "https://acm-pca-fips.us-east-2.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "us-iso-east-1", + "Region": "us-east-2", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.ap-southeast-3.api.aws" + "url": "https://acm-pca.us-west-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-3", - "UseDualStack": true + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.ap-southeast-3.amazonaws.com" + "url": "https://acm-pca-fips.us-west-1.amazonaws.com" } }, "params": { + "Region": "us-west-1", "UseFIPS": true, - "Region": "ap-southeast-3", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.ap-southeast-3.api.aws" + "url": "https://acm-pca.us-west-2.amazonaws.com" } }, "params": { + "Region": "us-west-2", "UseFIPS": false, - "Region": "ap-southeast-3", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.ap-southeast-3.amazonaws.com" + "url": "https://acm-pca-fips.us-west-2.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "ap-southeast-3", + "Region": "us-west-2", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.ap-southeast-4.api.aws" + "url": "https://acm-pca-fips.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "ap-southeast-4", "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.ap-southeast-4.amazonaws.com" + "url": "https://acm-pca.us-east-1.api.aws" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-4", - "UseDualStack": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://acm-pca.ap-southeast-4.api.aws" + "url": "https://acm-pca-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseFIPS": false, - "Region": "ap-southeast-4", + "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.ap-southeast-4.amazonaws.com" + "url": "https://acm-pca-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseFIPS": false, - "Region": "ap-southeast-4", + "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.us-east-1.api.aws" + "url": "https://acm-pca.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseFIPS": true, - "Region": "us-east-1", + "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.us-east-1.amazonaws.com" + "url": "https://acm-pca.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseFIPS": true, - "Region": "us-east-1", + "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.us-east-1.api.aws" + "url": "https://acm-pca.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-east-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.us-east-1.amazonaws.com" + "url": "https://acm-pca.us-gov-east-1.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "us-east-1", + "Region": "us-gov-east-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.us-east-2.api.aws" + "url": "https://acm-pca.us-gov-west-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "us-east-2", - "UseDualStack": true + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.us-east-2.amazonaws.com" + "url": "https://acm-pca.us-gov-west-1.amazonaws.com" } }, "params": { + "Region": "us-gov-west-1", "UseFIPS": true, - "Region": "us-east-2", "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://acm-pca.us-east-2.api.aws" + "url": "https://acm-pca-fips.us-gov-east-1.api.aws" } }, "params": { - "UseFIPS": false, - "Region": "us-east-2", + "Region": "us-gov-east-1", + "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://acm-pca.us-east-2.amazonaws.com" + "url": "https://acm-pca.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-east-2", - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://acm-pca-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "cn-northwest-1", "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca-fips.cn-northwest-1.amazonaws.com.cn" + "url": "https://acm-pca-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "cn-northwest-1", "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://acm-pca.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "cn-northwest-1", "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://acm-pca.cn-northwest-1.amazonaws.com.cn" + "url": "https://acm-pca.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "cn-northwest-1", "UseDualStack": false } }, @@ -1666,8 +561,8 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -1679,8 +574,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -1690,8 +585,8 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -1703,21 +598,34 @@ } }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1728,8 +636,8 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1740,11 +648,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/acmpca/src/main/resources/codegen-resources/service-2.json b/services/acmpca/src/main/resources/codegen-resources/service-2.json index ee020a89da80..71e71b0a92bc 100644 --- a/services/acmpca/src/main/resources/codegen-resources/service-2.json +++ b/services/acmpca/src/main/resources/codegen-resources/service-2.json @@ -810,7 +810,7 @@ }, "KeyStorageSecurityStandard":{ "shape":"KeyStorageSecurityStandard", - "documentation":"

    Specifies a cryptographic key management compliance standard used for handling CA keys.

    Default: FIPS_140_2_LEVEL_3_OR_HIGHER

    Note: FIPS_140_2_LEVEL_3_OR_HIGHER is not supported in the following Regions:

    • ap-northeast-3

    • ap-southeast-3

    When creating a CA in these Regions, you must provide FIPS_140_2_LEVEL_2_OR_HIGHER as the argument for KeyStorageSecurityStandard. Failure to do this results in an InvalidArgsException with the message, \"A certificate authority cannot be created in this region with the specified security standard.\"

    " + "documentation":"

    Specifies a cryptographic key management compliance standard used for handling CA keys.

    Default: FIPS_140_2_LEVEL_3_OR_HIGHER

    Some Amazon Web Services Regions do not support the default. When creating a CA in these Regions, you must provide FIPS_140_2_LEVEL_2_OR_HIGHER as the argument for KeyStorageSecurityStandard. Failure to do this results in an InvalidArgsException with the message, \"A certificate authority cannot be created in this region with the specified security standard.\"

    For information about security standard support in various Regions, see Storage and security compliance of Amazon Web Services Private CA private keys.

    " }, "Tags":{ "shape":"TagList", @@ -1390,7 +1390,7 @@ }, "SigningAlgorithm":{ "shape":"SigningAlgorithm", - "documentation":"

    The name of the algorithm that will be used to sign the certificate to be issued.

    This parameter should not be confused with the SigningAlgorithm parameter used to sign a CSR in the CreateCertificateAuthority action.

    The specified signing algorithm family (RSA or ECDSA) much match the algorithm family of the CA's secret key.

    " + "documentation":"

    The name of the algorithm that will be used to sign the certificate to be issued.

    This parameter should not be confused with the SigningAlgorithm parameter used to sign a CSR in the CreateCertificateAuthority action.

    The specified signing algorithm family (RSA or ECDSA) must match the algorithm family of the CA's secret key.

    " }, "TemplateArn":{ "shape":"Arn", @@ -1402,7 +1402,7 @@ }, "ValidityNotBefore":{ "shape":"Validity", - "documentation":"

    Information describing the start of the validity period of the certificate. This parameter sets the “Not Before\" date for the certificate.

    By default, when issuing a certificate, Amazon Web Services Private CA sets the \"Not Before\" date to the issuance time minus 60 minutes. This compensates for clock inconsistencies across computer systems. The ValidityNotBefore parameter can be used to customize the “Not Before” value.

    Unlike the Validity parameter, the ValidityNotBefore parameter is optional.

    The ValidityNotBefore value is expressed as an explicit date and time, using the Validity type value ABSOLUTE. For more information, see Validity in this API reference and Validity in RFC 5280.

    " + "documentation":"

    Information describing the start of the validity period of the certificate. This parameter sets the “Not Before\" date for the certificate.

    By default, when issuing a certificate, Amazon Web Services Private CA sets the \"Not Before\" date to the issuance time minus 60 minutes. This compensates for clock inconsistencies across computer systems. The ValidityNotBefore parameter can be used to customize the “Not Before” value.

    Unlike the Validity parameter, the ValidityNotBefore parameter is optional.

    The ValidityNotBefore value is expressed as an explicit date and time, using the Validity type value ABSOLUTE. For more information, see Validity in this API reference and Validity in RFC 5280.

    " }, "IdempotencyToken":{ "shape":"IdempotencyToken", diff --git a/services/alexaforbusiness/pom.xml b/services/alexaforbusiness/pom.xml index e1f99e2348f2..5d8f94d02bc9 100644 --- a/services/alexaforbusiness/pom.xml +++ b/services/alexaforbusiness/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 alexaforbusiness diff --git a/services/amp/pom.xml b/services/amp/pom.xml index 3ec7a02a34c6..9661e89db2b3 100644 --- a/services/amp/pom.xml +++ b/services/amp/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT amp AWS Java SDK :: Services :: Amp diff --git a/services/amplify/pom.xml b/services/amplify/pom.xml index 900756f072cb..a99e28f5513e 100644 --- a/services/amplify/pom.xml +++ b/services/amplify/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT amplify AWS Java SDK :: Services :: Amplify diff --git a/services/amplifybackend/pom.xml b/services/amplifybackend/pom.xml index 308341636025..842ad1b919f7 100644 --- a/services/amplifybackend/pom.xml +++ b/services/amplifybackend/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT amplifybackend AWS Java SDK :: Services :: Amplify Backend diff --git a/services/amplifyuibuilder/pom.xml b/services/amplifyuibuilder/pom.xml index 3c1baaff10ba..2f8ac2126113 100644 --- a/services/amplifyuibuilder/pom.xml +++ b/services/amplifyuibuilder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT amplifyuibuilder AWS Java SDK :: Services :: Amplify UI Builder diff --git a/services/amplifyuibuilder/src/main/resources/codegen-resources/endpoint-tests.json b/services/amplifyuibuilder/src/main/resources/codegen-resources/endpoint-tests.json index 5389e0b4f801..236a7ed1983c 100644 --- a/services/amplifyuibuilder/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/amplifyuibuilder/src/main/resources/codegen-resources/endpoint-tests.json @@ -9,8 +9,8 @@ }, "params": { "Region": "ap-northeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -22,8 +22,8 @@ }, "params": { "Region": "ap-northeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -35,8 +35,8 @@ }, "params": { "Region": "ap-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -48,8 +48,8 @@ }, "params": { "Region": "ap-southeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -61,8 +61,8 @@ }, "params": { "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -74,8 +74,8 @@ }, "params": { "Region": "ca-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -87,8 +87,8 @@ }, "params": { "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -100,8 +100,8 @@ }, "params": { "Region": "eu-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -113,8 +113,8 @@ }, "params": { "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -126,8 +126,8 @@ }, "params": { "Region": "eu-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -139,8 +139,8 @@ }, "params": { "Region": "eu-west-3", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -152,8 +152,8 @@ }, "params": { "Region": "me-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -165,8 +165,8 @@ }, "params": { "Region": "sa-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -178,8 +178,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -191,8 +191,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -204,8 +204,8 @@ }, "params": { "Region": "us-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -217,8 +217,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -230,8 +230,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -243,8 +243,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -256,8 +256,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -269,8 +269,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -282,8 +282,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -295,8 +295,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -308,8 +308,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -321,8 +321,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -334,8 +334,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -347,8 +347,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -360,8 +360,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -371,8 +371,8 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -384,8 +384,8 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -395,8 +395,8 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -408,8 +408,8 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -419,8 +419,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -432,8 +432,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -443,8 +443,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -456,8 +456,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -469,8 +469,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -482,8 +482,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -494,8 +494,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -506,8 +506,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } }, diff --git a/services/amplifyuibuilder/src/main/resources/codegen-resources/paginators-1.json b/services/amplifyuibuilder/src/main/resources/codegen-resources/paginators-1.json index c3557207f8f0..52f5e312e195 100644 --- a/services/amplifyuibuilder/src/main/resources/codegen-resources/paginators-1.json +++ b/services/amplifyuibuilder/src/main/resources/codegen-resources/paginators-1.json @@ -15,6 +15,12 @@ "output_token": "nextToken", "result_key": "entities" }, + "ListCodegenJobs": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "entities" + }, "ListComponents": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/services/amplifyuibuilder/src/main/resources/codegen-resources/service-2.json b/services/amplifyuibuilder/src/main/resources/codegen-resources/service-2.json index 43019cf40016..a0b39beb017d 100644 --- a/services/amplifyuibuilder/src/main/resources/codegen-resources/service-2.json +++ b/services/amplifyuibuilder/src/main/resources/codegen-resources/service-2.json @@ -45,7 +45,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

    Creates a new form for an Amplify app.

    ", + "documentation":"

    Creates a new form for an Amplify.

    ", "idempotent":true }, "CreateTheme":{ @@ -173,6 +173,23 @@ ], "documentation":"

    Exports theme configurations to code that is ready to integrate into an Amplify app.

    " }, + "GetCodegenJob":{ + "name":"GetCodegenJob", + "http":{ + "method":"GET", + "requestUri":"/app/{appId}/environment/{environmentName}/codegen-jobs/{id}", + "responseCode":200 + }, + "input":{"shape":"GetCodegenJobRequest"}, + "output":{"shape":"GetCodegenJobResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Returns an existing code generation job.

    " + }, "GetComponent":{ "name":"GetComponent", "http":{ @@ -236,6 +253,22 @@ ], "documentation":"

    Returns an existing theme for an Amplify app.

    " }, + "ListCodegenJobs":{ + "name":"ListCodegenJobs", + "http":{ + "method":"GET", + "requestUri":"/app/{appId}/environment/{environmentName}/codegen-jobs", + "responseCode":200 + }, + "input":{"shape":"ListCodegenJobsRequest"}, + "output":{"shape":"ListCodegenJobsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Retrieves a list of code generation jobs for a specified Amplify app and backend environment.

    " + }, "ListComponents":{ "name":"ListComponents", "http":{ @@ -309,6 +342,22 @@ ], "documentation":"

    Refreshes a previously issued access token that might have expired.

    " }, + "StartCodegenJob":{ + "name":"StartCodegenJob", + "http":{ + "method":"POST", + "requestUri":"/app/{appId}/environment/{environmentName}/codegen-jobs", + "responseCode":200 + }, + "input":{"shape":"StartCodegenJobRequest"}, + "output":{"shape":"StartCodegenJobResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Starts a code generation job for for a specified Amplify app and backend environment.

    " + }, "UpdateComponent":{ "name":"UpdateComponent", "http":{ @@ -404,10 +453,366 @@ }, "documentation":"

    Represents the event action configuration for an element of a Component or ComponentChild. Use for the workflow feature in Amplify Studio that allows you to bind events and actions to components. ActionParameters defines the action that is performed when an event occurs on the component.

    " }, + "AppId":{ + "type":"string", + "max":20, + "min":1, + "pattern":"d[a-z0-9]+" + }, + "AssociatedFieldsList":{ + "type":"list", + "member":{"shape":"String"} + }, "Boolean":{ "type":"boolean", "box":true }, + "CodegenFeatureFlags":{ + "type":"structure", + "members":{ + "isRelationshipSupported":{ + "shape":"Boolean", + "documentation":"

    Specifes whether a code generation job supports data relationships.

    " + }, + "isNonModelSupported":{ + "shape":"Boolean", + "documentation":"

    Specifies whether a code generation job supports non models.

    " + } + }, + "documentation":"

    Describes the feature flags that you can specify for a code generation job.

    " + }, + "CodegenGenericDataEnum":{ + "type":"structure", + "required":["values"], + "members":{ + "values":{ + "shape":"CodegenGenericDataEnumValuesList", + "documentation":"

    The list of enum values in the generic data schema.

    " + } + }, + "documentation":"

    Describes the enums in a generic data schema.

    " + }, + "CodegenGenericDataEnumValuesList":{ + "type":"list", + "member":{"shape":"String"} + }, + "CodegenGenericDataEnums":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"CodegenGenericDataEnum"} + }, + "CodegenGenericDataField":{ + "type":"structure", + "required":[ + "dataType", + "dataTypeValue", + "required", + "readOnly", + "isArray" + ], + "members":{ + "dataType":{ + "shape":"CodegenGenericDataFieldDataType", + "documentation":"

    The data type for the generic data field.

    " + }, + "dataTypeValue":{ + "shape":"String", + "documentation":"

    The value of the data type for the generic data field.

    " + }, + "required":{ + "shape":"Boolean", + "documentation":"

    Specifies whether the generic data field is required.

    " + }, + "readOnly":{ + "shape":"Boolean", + "documentation":"

    Specifies whether the generic data field is read-only.

    " + }, + "isArray":{ + "shape":"Boolean", + "documentation":"

    Specifies whether the generic data field is an array.

    " + }, + "relationship":{ + "shape":"CodegenGenericDataRelationshipType", + "documentation":"

    The relationship of the generic data schema.

    " + } + }, + "documentation":"

    Describes a field in a generic data schema.

    " + }, + "CodegenGenericDataFieldDataType":{ + "type":"string", + "enum":[ + "ID", + "String", + "Int", + "Float", + "AWSDate", + "AWSTime", + "AWSDateTime", + "AWSTimestamp", + "AWSEmail", + "AWSURL", + "AWSIPAddress", + "Boolean", + "AWSJSON", + "AWSPhone", + "Enum", + "Model", + "NonModel" + ] + }, + "CodegenGenericDataFields":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"CodegenGenericDataField"} + }, + "CodegenGenericDataModel":{ + "type":"structure", + "required":[ + "fields", + "primaryKeys" + ], + "members":{ + "fields":{ + "shape":"CodegenGenericDataFields", + "documentation":"

    The fields in the generic data model.

    " + }, + "isJoinTable":{ + "shape":"Boolean", + "documentation":"

    Specifies whether the generic data model is a join table.

    " + }, + "primaryKeys":{ + "shape":"CodegenPrimaryKeysList", + "documentation":"

    The primary keys of the generic data model.

    " + } + }, + "documentation":"

    Describes a model in a generic data schema.

    " + }, + "CodegenGenericDataModels":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"CodegenGenericDataModel"} + }, + "CodegenGenericDataNonModel":{ + "type":"structure", + "required":["fields"], + "members":{ + "fields":{ + "shape":"CodegenGenericDataNonModelFields", + "documentation":"

    The fields in a generic data schema non model.

    " + } + }, + "documentation":"

    Describes a non-model in a generic data schema.

    " + }, + "CodegenGenericDataNonModelFields":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"CodegenGenericDataField"} + }, + "CodegenGenericDataNonModels":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"CodegenGenericDataNonModel"} + }, + "CodegenGenericDataRelationshipType":{ + "type":"structure", + "required":[ + "type", + "relatedModelName" + ], + "members":{ + "type":{ + "shape":"GenericDataRelationshipType", + "documentation":"

    The data relationship type.

    " + }, + "relatedModelName":{ + "shape":"String", + "documentation":"

    The name of the related model in the data relationship.

    " + }, + "relatedModelFields":{ + "shape":"RelatedModelFieldsList", + "documentation":"

    The related model fields in the data relationship.

    " + }, + "canUnlinkAssociatedModel":{ + "shape":"Boolean", + "documentation":"

    Specifies whether the relationship can unlink the associated model.

    " + }, + "relatedJoinFieldName":{ + "shape":"String", + "documentation":"

    The name of the related join field in the data relationship.

    " + }, + "relatedJoinTableName":{ + "shape":"String", + "documentation":"

    The name of the related join table in the data relationship.

    " + }, + "belongsToFieldOnRelatedModel":{ + "shape":"String", + "documentation":"

    The value of the belongsTo field on the related data model.

    " + }, + "associatedFields":{ + "shape":"AssociatedFieldsList", + "documentation":"

    The associated fields of the data relationship.

    " + }, + "isHasManyIndex":{ + "shape":"Boolean", + "documentation":"

    Specifies whether the @index directive is supported for a hasMany data relationship.

    " + } + }, + "documentation":"

    Describes the relationship between generic data models.

    " + }, + "CodegenJob":{ + "type":"structure", + "required":[ + "id", + "appId", + "environmentName" + ], + "members":{ + "id":{ + "shape":"Uuid", + "documentation":"

    The unique ID for the code generation job.

    " + }, + "appId":{ + "shape":"AppId", + "documentation":"

    The ID of the Amplify app associated with the code generation job.

    " + }, + "environmentName":{ + "shape":"String", + "documentation":"

    The name of the backend environment associated with the code generation job.

    " + }, + "renderConfig":{"shape":"CodegenJobRenderConfig"}, + "genericDataSchema":{"shape":"CodegenJobGenericDataSchema"}, + "autoGenerateForms":{ + "shape":"Boolean", + "documentation":"

    Specifies whether to autogenerate forms in the code generation job.

    " + }, + "features":{"shape":"CodegenFeatureFlags"}, + "status":{ + "shape":"CodegenJobStatus", + "documentation":"

    The status of the code generation job.

    " + }, + "statusMessage":{ + "shape":"String", + "documentation":"

    The customized status message for the code generation job.

    " + }, + "asset":{ + "shape":"CodegenJobAsset", + "documentation":"

    The CodegenJobAsset to use for the code generation job.

    " + }, + "tags":{ + "shape":"Tags", + "documentation":"

    One or more key-value pairs to use when tagging the code generation job.

    " + }, + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

    The time that the code generation job was created.

    " + }, + "modifiedAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

    The time that the code generation job was modified.

    " + } + }, + "documentation":"

    Describes the configuration for a code generation job that is associated with an Amplify app.

    " + }, + "CodegenJobAsset":{ + "type":"structure", + "members":{ + "downloadUrl":{ + "shape":"String", + "documentation":"

    The URL to use to access the asset.

    " + } + }, + "documentation":"

    Describes an asset for a code generation job.

    " + }, + "CodegenJobGenericDataSchema":{ + "type":"structure", + "required":[ + "dataSourceType", + "models", + "enums", + "nonModels" + ], + "members":{ + "dataSourceType":{ + "shape":"CodegenJobGenericDataSourceType", + "documentation":"

    The type of the data source for the schema. Currently, the only valid value is an Amplify DataStore.

    " + }, + "models":{ + "shape":"CodegenGenericDataModels", + "documentation":"

    The name of a CodegenGenericDataModel.

    " + }, + "enums":{ + "shape":"CodegenGenericDataEnums", + "documentation":"

    The name of a CodegenGenericDataEnum.

    " + }, + "nonModels":{ + "shape":"CodegenGenericDataNonModels", + "documentation":"

    The name of a CodegenGenericDataNonModel.

    " + } + }, + "documentation":"

    Describes the data schema for a code generation job.

    " + }, + "CodegenJobGenericDataSourceType":{ + "type":"string", + "enum":["DataStore"] + }, + "CodegenJobRenderConfig":{ + "type":"structure", + "members":{ + "react":{ + "shape":"ReactStartCodegenJobData", + "documentation":"

    The name of the ReactStartCodegenJobData object.

    " + } + }, + "documentation":"

    Describes the configuration information for rendering the UI component associated the code generation job.

    ", + "union":true + }, + "CodegenJobStatus":{ + "type":"string", + "enum":[ + "in_progress", + "failed", + "succeeded" + ] + }, + "CodegenJobSummary":{ + "type":"structure", + "required":[ + "appId", + "environmentName", + "id" + ], + "members":{ + "appId":{ + "shape":"AppId", + "documentation":"

    The unique ID of the Amplify app associated with the code generation job.

    " + }, + "environmentName":{ + "shape":"String", + "documentation":"

    The name of the backend environment associated with the code generation job.

    " + }, + "id":{ + "shape":"Uuid", + "documentation":"

    The unique ID for the code generation job summary.

    " + }, + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

    The time that the code generation job summary was created.

    " + }, + "modifiedAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

    The time that the code generation job summary was modified.

    " + } + }, + "documentation":"

    A summary of the basic information about the code generation job.

    " + }, + "CodegenJobSummaryList":{ + "type":"list", + "member":{"shape":"CodegenJobSummary"} + }, + "CodegenPrimaryKeysList":{ + "type":"list", + "member":{"shape":"String"} + }, "Component":{ "type":"structure", "required":[ @@ -1913,6 +2318,52 @@ "type":"list", "member":{"shape":"FormSummary"} }, + "GenericDataRelationshipType":{ + "type":"string", + "enum":[ + "HAS_MANY", + "HAS_ONE", + "BELONGS_TO" + ] + }, + "GetCodegenJobRequest":{ + "type":"structure", + "required":[ + "appId", + "environmentName", + "id" + ], + "members":{ + "appId":{ + "shape":"AppId", + "documentation":"

    The unique ID of the Amplify app associated with the code generation job.

    ", + "location":"uri", + "locationName":"appId" + }, + "environmentName":{ + "shape":"String", + "documentation":"

    The name of the backend environment that is a part of the Amplify app associated with the code generation job.

    ", + "location":"uri", + "locationName":"environmentName" + }, + "id":{ + "shape":"Uuid", + "documentation":"

    The unique ID of the code generation job.

    ", + "location":"uri", + "locationName":"id" + } + } + }, + "GetCodegenJobResponse":{ + "type":"structure", + "members":{ + "job":{ + "shape":"CodegenJob", + "documentation":"

    The configuration settings for the code generation job.

    " + } + }, + "payload":"job" + }, "GetComponentRequest":{ "type":"structure", "required":[ @@ -2088,6 +2539,28 @@ }, "exception":true }, + "JSModule":{ + "type":"string", + "enum":[ + "es2020", + "esnext" + ] + }, + "JSScript":{ + "type":"string", + "enum":[ + "jsx", + "tsx", + "js" + ] + }, + "JSTarget":{ + "type":"string", + "enum":[ + "es2015", + "es2020" + ] + }, "LabelDecorator":{ "type":"string", "enum":[ @@ -2096,6 +2569,59 @@ "none" ] }, + "ListCodegenJobsLimit":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListCodegenJobsRequest":{ + "type":"structure", + "required":[ + "appId", + "environmentName" + ], + "members":{ + "appId":{ + "shape":"AppId", + "documentation":"

    The unique ID for the Amplify app.

    ", + "location":"uri", + "locationName":"appId" + }, + "environmentName":{ + "shape":"String", + "documentation":"

    The name of the backend environment that is a part of the Amplify app.

    ", + "location":"uri", + "locationName":"environmentName" + }, + "nextToken":{ + "shape":"String", + "documentation":"

    The token to request the next page of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"ListCodegenJobsLimit", + "documentation":"

    The maximum number of jobs to retrieve.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListCodegenJobsResponse":{ + "type":"structure", + "required":["entities"], + "members":{ + "entities":{ + "shape":"CodegenJobSummaryList", + "documentation":"

    The list of code generation jobs for the Amplify app.

    " + }, + "nextToken":{ + "shape":"String", + "documentation":"

    The pagination token that's included if more results are available.

    " + } + } + }, "ListComponentsLimit":{ "type":"integer", "max":100, @@ -2362,6 +2888,32 @@ }, "payload":"body" }, + "ReactStartCodegenJobData":{ + "type":"structure", + "members":{ + "module":{ + "shape":"JSModule", + "documentation":"

    The JavaScript module type.

    " + }, + "target":{ + "shape":"JSTarget", + "documentation":"

    The ECMAScript specification to use.

    " + }, + "script":{ + "shape":"JSScript", + "documentation":"

    The file type to use for a JavaScript project.

    " + }, + "renderTypeDeclarations":{ + "shape":"Boolean", + "documentation":"

    Specifies whether the code generation job should render type declaration files.

    " + }, + "inlineSourceMap":{ + "shape":"Boolean", + "documentation":"

    Specifies whether the code generation job should render inline source maps.

    " + } + }, + "documentation":"

    Describes the code generation job configuration for a React project.

    " + }, "RefreshTokenRequest":{ "type":"structure", "required":[ @@ -2414,6 +2966,10 @@ } } }, + "RelatedModelFieldsList":{ + "type":"list", + "member":{"shape":"String"} + }, "ResourceConflictException":{ "type":"structure", "members":{ @@ -2519,6 +3075,77 @@ "type":"list", "member":{"shape":"SortProperty"} }, + "StartCodegenJobData":{ + "type":"structure", + "required":["renderConfig"], + "members":{ + "renderConfig":{ + "shape":"CodegenJobRenderConfig", + "documentation":"

    The code generation configuration for the codegen job.

    " + }, + "genericDataSchema":{ + "shape":"CodegenJobGenericDataSchema", + "documentation":"

    The data schema to use for a code generation job.

    " + }, + "autoGenerateForms":{ + "shape":"Boolean", + "documentation":"

    Specifies whether to autogenerate forms in the code generation job.

    " + }, + "features":{ + "shape":"CodegenFeatureFlags", + "documentation":"

    The feature flags for a code generation job.

    " + }, + "tags":{ + "shape":"Tags", + "documentation":"

    One or more key-value pairs to use when tagging the code generation job data.

    " + } + }, + "documentation":"

    The code generation job resource configuration.

    " + }, + "StartCodegenJobRequest":{ + "type":"structure", + "required":[ + "appId", + "environmentName", + "codegenJobToCreate" + ], + "members":{ + "appId":{ + "shape":"AppId", + "documentation":"

    The unique ID for the Amplify app.

    ", + "location":"uri", + "locationName":"appId" + }, + "environmentName":{ + "shape":"String", + "documentation":"

    The name of the backend environment that is a part of the Amplify app.

    ", + "location":"uri", + "locationName":"environmentName" + }, + "clientToken":{ + "shape":"String", + "documentation":"

    The idempotency token used to ensure that the code generation job request completes only once.

    ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + }, + "codegenJobToCreate":{ + "shape":"StartCodegenJobData", + "documentation":"

    The code generation job resource configuration.

    " + } + }, + "payload":"codegenJobToCreate" + }, + "StartCodegenJobResponse":{ + "type":"structure", + "members":{ + "entity":{ + "shape":"CodegenJob", + "documentation":"

    The code generation job for a UI component that is associated with an Amplify app.

    " + } + }, + "payload":"entity" + }, "StorageAccessLevel":{ "type":"string", "enum":[ @@ -2675,6 +3302,18 @@ "type":"list", "member":{"shape":"ThemeValues"} }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    The request was denied due to request throttling.

    ", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, "TokenProviders":{ "type":"string", "enum":["figma"] diff --git a/services/apigateway/pom.xml b/services/apigateway/pom.xml index 115c0bedfa88..de614feb0c9c 100644 --- a/services/apigateway/pom.xml +++ b/services/apigateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT apigateway AWS Java SDK :: Services :: Amazon API Gateway diff --git a/services/apigatewaymanagementapi/pom.xml b/services/apigatewaymanagementapi/pom.xml index 9f3950c1a140..44e3f0b6f820 100644 --- a/services/apigatewaymanagementapi/pom.xml +++ b/services/apigatewaymanagementapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT apigatewaymanagementapi AWS Java SDK :: Services :: ApiGatewayManagementApi diff --git a/services/apigatewayv2/pom.xml b/services/apigatewayv2/pom.xml index 3df961f8d103..f07b85435509 100644 --- a/services/apigatewayv2/pom.xml +++ b/services/apigatewayv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT apigatewayv2 AWS Java SDK :: Services :: ApiGatewayV2 diff --git a/services/appconfig/pom.xml b/services/appconfig/pom.xml index e9ad6a64bf9f..7b46dae9d20c 100644 --- a/services/appconfig/pom.xml +++ b/services/appconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT appconfig AWS Java SDK :: Services :: AppConfig diff --git a/services/appconfigdata/pom.xml b/services/appconfigdata/pom.xml index e8ba09c0d74a..21bfc3b9c6d3 100644 --- a/services/appconfigdata/pom.xml +++ b/services/appconfigdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT appconfigdata AWS Java SDK :: Services :: App Config Data diff --git a/services/appflow/pom.xml b/services/appflow/pom.xml index a87b386be4be..3136b4cc6dd9 100644 --- a/services/appflow/pom.xml +++ b/services/appflow/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT appflow AWS Java SDK :: Services :: Appflow diff --git a/services/appflow/src/main/resources/codegen-resources/service-2.json b/services/appflow/src/main/resources/codegen-resources/service-2.json index e48d27ecd73c..63ccce2dd6a8 100644 --- a/services/appflow/src/main/resources/codegen-resources/service-2.json +++ b/services/appflow/src/main/resources/codegen-resources/service-2.json @@ -265,6 +265,22 @@ ], "documentation":"

    Registers a new custom connector with your Amazon Web Services account. Before you can register the connector, you must deploy the associated AWS lambda function in your account.

    " }, + "ResetConnectorMetadataCache":{ + "name":"ResetConnectorMetadataCache", + "http":{ + "method":"POST", + "requestUri":"/reset-connector-metadata-cache" + }, + "input":{"shape":"ResetConnectorMetadataCacheRequest"}, + "output":{"shape":"ResetConnectorMetadataCacheResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Resets metadata about your connector entities that Amazon AppFlow stored in its cache. Use this action when you want Amazon AppFlow to return the latest information about the data that you have in a source application.

    Amazon AppFlow returns metadata about your entities when you use the ListConnectorEntities or DescribeConnectorEntities actions. Following these actions, Amazon AppFlow caches the metadata to reduce the number of API requests that it must send to the source application. Amazon AppFlow automatically resets the cache once every hour, but you can use this action when you want to get the latest metadata right away.

    " + }, "StartFlow":{ "name":"StartFlow", "http":{ @@ -4067,6 +4083,36 @@ }, "documentation":"

    Describes the status of an attempt from Amazon AppFlow to register a resource.

    When you run a flow that you've configured to use a metadata catalog, Amazon AppFlow registers a metadata table and data partitions with that catalog. This operation provides the status of that registration attempt. The operation also indicates how many related resources Amazon AppFlow created or updated.

    " }, + "ResetConnectorMetadataCacheRequest":{ + "type":"structure", + "members":{ + "connectorProfileName":{ + "shape":"ConnectorProfileName", + "documentation":"

    The name of the connector profile that you want to reset cached metadata for.

    You can omit this parameter if you're resetting the cache for any of the following connectors: Amazon Connect, Amazon EventBridge, Amazon Lookout for Metrics, Amazon S3, or Upsolver. If you're resetting the cache for any other connector, you must include this parameter in your request.

    " + }, + "connectorType":{ + "shape":"ConnectorType", + "documentation":"

    The type of connector to reset cached metadata for.

    You must include this parameter in your request if you're resetting the cache for any of the following connectors: Amazon Connect, Amazon EventBridge, Amazon Lookout for Metrics, Amazon S3, or Upsolver. If you're resetting the cache for any other connector, you can omit this parameter from your request.

    " + }, + "connectorEntityName":{ + "shape":"EntityName", + "documentation":"

    Use this parameter if you want to reset cached metadata about the details for an individual entity.

    If you don't include this parameter in your request, Amazon AppFlow only resets cached metadata about entity names, not entity details.

    " + }, + "entitiesPath":{ + "shape":"EntitiesPath", + "documentation":"

    Use this parameter only if you’re resetting the cached metadata about a nested entity. Only some connectors support nested entities. A nested entity is one that has another entity as a parent. To use this parameter, specify the name of the parent entity.

    To look up the parent-child relationship of entities, you can send a ListConnectorEntities request that omits the entitiesPath parameter. Amazon AppFlow will return a list of top-level entities. For each one, it indicates whether the entity has nested entities. Then, in a subsequent ListConnectorEntities request, you can specify a parent entity name for the entitiesPath parameter. Amazon AppFlow will return a list of the child entities for that parent.

    " + }, + "apiVersion":{ + "shape":"ApiVersion", + "documentation":"

    The API version that you specified in the connector profile that you’re resetting cached metadata for. You must use this parameter only if the connector supports multiple API versions or if the connector type is CustomConnector.

    To look up how many versions a connector supports, use the DescribeConnectors action. In the response, find the value that Amazon AppFlow returns for the connectorVersion parameter.

    To look up the connector type, use the DescribeConnectorProfiles action. In the response, find the value that Amazon AppFlow returns for the connectorType parameter.

    To look up the API version that you specified in a connector profile, use the DescribeConnectorProfiles action.

    " + } + } + }, + "ResetConnectorMetadataCacheResponse":{ + "type":"structure", + "members":{ + } + }, "ResourceNotFoundException":{ "type":"structure", "members":{ diff --git a/services/appintegrations/pom.xml b/services/appintegrations/pom.xml index 9eea5f80cb49..fdf847b02523 100644 --- a/services/appintegrations/pom.xml +++ b/services/appintegrations/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT appintegrations AWS Java SDK :: Services :: App Integrations diff --git a/services/applicationautoscaling/pom.xml b/services/applicationautoscaling/pom.xml index 709493c42662..13b33b6a79a3 100644 --- a/services/applicationautoscaling/pom.xml +++ b/services/applicationautoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT applicationautoscaling AWS Java SDK :: Services :: AWS Application Auto Scaling diff --git a/services/applicationcostprofiler/pom.xml b/services/applicationcostprofiler/pom.xml index d302e3827c8b..f4cda7771200 100644 --- a/services/applicationcostprofiler/pom.xml +++ b/services/applicationcostprofiler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT applicationcostprofiler AWS Java SDK :: Services :: Application Cost Profiler diff --git a/services/applicationdiscovery/pom.xml b/services/applicationdiscovery/pom.xml index 0e54eca6c2ca..cdc8abe125ae 100644 --- a/services/applicationdiscovery/pom.xml +++ b/services/applicationdiscovery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT applicationdiscovery AWS Java SDK :: Services :: AWS Application Discovery Service diff --git a/services/applicationdiscovery/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/applicationdiscovery/src/main/resources/codegen-resources/endpoint-rule-set.json index 9251fb9e4984..b745570fa9cb 100644 --- a/services/applicationdiscovery/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/applicationdiscovery/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] }, - "supportsFIPS" + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://discovery-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://discovery-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://discovery-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://discovery.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -222,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://discovery-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://discovery.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -231,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://discovery.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://discovery.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/applicationdiscovery/src/main/resources/codegen-resources/endpoint-tests.json b/services/applicationdiscovery/src/main/resources/codegen-resources/endpoint-tests.json index 7f10fcad8c95..ec4122fe2f9c 100644 --- a/services/applicationdiscovery/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/applicationdiscovery/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,42 +1,29 @@ { "testCases": [ { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://discovery-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery-fips.eu-central-1.amazonaws.com" + "url": "https://discovery.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "eu-central-1" + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery.eu-central-1.api.aws" + "url": "https://discovery.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { @@ -47,48 +34,48 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-central-1", "UseFIPS": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery-fips.us-west-2.api.aws" + "url": "https://discovery.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "UseFIPS": true, - "Region": "us-west-2" + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery-fips.us-west-2.amazonaws.com" + "url": "https://discovery.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-west-2" + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery.us-west-2.api.aws" + "url": "https://discovery.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -99,282 +86,274 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": false, - "Region": "us-west-2" + "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://discovery-fips.eu-west-2.api.aws" + "url": "https://discovery-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": true, - "Region": "eu-west-2" + "UseDualStack": true } }, { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery-fips.eu-west-2.amazonaws.com" + "url": "https://discovery-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://discovery.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "UseFIPS": false, - "Region": "eu-west-2" + "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://discovery.eu-west-2.amazonaws.com" + "url": "https://discovery.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "eu-west-2" + "UseDualStack": true } }, { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://discovery-fips.eu-west-1.api.aws" + "url": "https://discovery-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "eu-west-1" + "UseDualStack": true } }, { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery-fips.eu-west-1.amazonaws.com" + "url": "https://discovery-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "eu-west-1" + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://discovery.eu-west-1.api.aws" + "url": "https://discovery.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "eu-west-1" + "UseDualStack": true } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery.eu-west-1.amazonaws.com" + "url": "https://discovery.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "eu-west-1" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://discovery-fips.ap-northeast-1.api.aws" + "url": "https://discovery-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "ap-northeast-1" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery-fips.ap-northeast-1.amazonaws.com" + "url": "https://discovery-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://discovery.ap-northeast-1.api.aws" + "url": "https://discovery.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "ap-northeast-1" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery.ap-northeast-1.amazonaws.com" + "url": "https://discovery.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://discovery-fips.ap-southeast-2.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "ap-southeast-2" + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery-fips.ap-southeast-2.amazonaws.com" + "url": "https://discovery-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://discovery.ap-southeast-2.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "ap-southeast-2" + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery.ap-southeast-2.amazonaws.com" + "url": "https://discovery.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://discovery-fips.us-east-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery-fips.us-east-1.amazonaws.com" + "url": "https://discovery-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://discovery.us-east-1.api.aws" + "url": "https://discovery.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://discovery.us-east-1.amazonaws.com" + "url": "https://example.com" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false, + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -384,9 +363,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -396,11 +375,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/applicationdiscovery/src/main/resources/codegen-resources/service-2.json b/services/applicationdiscovery/src/main/resources/codegen-resources/service-2.json index 322a66a3ec95..3d97897979a5 100644 --- a/services/applicationdiscovery/src/main/resources/codegen-resources/service-2.json +++ b/services/applicationdiscovery/src/main/resources/codegen-resources/service-2.json @@ -131,7 +131,7 @@ {"shape":"ServerInternalErrorException"}, {"shape":"HomeRegionNotSetException"} ], - "documentation":"

    Lists agents or connectors as specified by ID or other filters. All agents/connectors associated with your user account can be listed if you call DescribeAgents as is without passing any parameters.

    " + "documentation":"

    Lists agents or collectors as specified by ID or other filters. All agents/collectors associated with your user can be listed if you call DescribeAgents as is without passing any parameters.

    " }, "DescribeConfigurations":{ "name":"DescribeConfigurations", @@ -167,7 +167,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"HomeRegionNotSetException"} ], - "documentation":"

    Lists exports as specified by ID. All continuous exports associated with your user account can be listed if you call DescribeContinuousExports as is without passing any parameters.

    " + "documentation":"

    Lists exports as specified by ID. All continuous exports associated with your user can be listed if you call DescribeContinuousExports as is without passing any parameters.

    " }, "DescribeExportConfigurations":{ "name":"DescribeExportConfigurations", @@ -238,7 +238,7 @@ {"shape":"ServerInternalErrorException"}, {"shape":"HomeRegionNotSetException"} ], - "documentation":"

    Retrieves a list of configuration items that have tags as specified by the key-value pairs, name and value, passed to the optional parameter filters.

    There are three valid tag filter names:

    • tagKey

    • tagValue

    • configurationId

    Also, all configuration items associated with your user account that have tags can be listed if you call DescribeTags as is without passing any parameters.

    " + "documentation":"

    Retrieves a list of configuration items that have tags as specified by the key-value pairs, name and value, passed to the optional parameter filters.

    There are three valid tag filter names:

    • tagKey

    • tagValue

    • configurationId

    Also, all configuration items associated with your user that have tags can be listed if you call DescribeTags as is without passing any parameters.

    " }, "DisassociateConfigurationItemsFromApplication":{ "name":"DisassociateConfigurationItemsFromApplication", @@ -362,7 +362,7 @@ {"shape":"ServerInternalErrorException"}, {"shape":"HomeRegionNotSetException"} ], - "documentation":"

    Instructs the specified agents or connectors to start collecting data.

    " + "documentation":"

    Instructs the specified agents to start collecting data.

    " }, "StartExportTask":{ "name":"StartExportTask", @@ -380,7 +380,7 @@ {"shape":"OperationNotPermittedException"}, {"shape":"HomeRegionNotSetException"} ], - "documentation":"

    Begins the export of discovered data to an S3 bucket.

    If you specify agentIds in a filter, the task exports up to 72 hours of detailed data collected by the identified Application Discovery Agent, including network, process, and performance details. A time range for exported agent data may be set by using startTime and endTime. Export of detailed agent data is limited to five concurrently running exports.

    If you do not include an agentIds filter, summary data is exported that includes both Amazon Web Services Agentless Discovery Connector data and summary data from Amazon Web Services Discovery Agents. Export of summary data is limited to two exports per day.

    " + "documentation":"

    Begins the export of a discovered data report to an Amazon S3 bucket managed by Amazon Web Services.

    Exports might provide an estimate of fees and savings based on certain information that you provide. Fee estimates do not include any taxes that might apply. Your actual fees and savings depend on a variety of factors, including your actual usage of Amazon Web Services services, which might vary from the estimates provided in this report.

    If you do not specify preferences or agentIds in the filter, a summary of all servers, applications, tags, and performance is generated. This data is an aggregation of all server data collected through on-premises tooling, file import, application grouping and applying tags.

    If you specify agentIds in a filter, the task exports up to 72 hours of detailed data collected by the identified Application Discovery Agent, including network, process, and performance details. A time range for exported agent data may be set by using startTime and endTime. Export of detailed agent data is limited to five concurrently running exports. Export of detailed agent data is limited to two exports per day.

    If you enable ec2RecommendationsPreferences in preferences , an Amazon EC2 instance matching the characteristics of each server in Application Discovery Service is generated. Changing the attributes of the ec2RecommendationsPreferences changes the criteria of the recommendation.

    " }, "StartImportTask":{ "name":"StartImportTask", @@ -398,7 +398,7 @@ {"shape":"ServerInternalErrorException"}, {"shape":"HomeRegionNotSetException"} ], - "documentation":"

    Starts an import task, which allows you to import details of your on-premises environment directly into Amazon Web Services Migration Hub without having to use the Application Discovery Service (ADS) tools such as the Discovery Connector or Discovery Agent. This gives you the option to perform migration assessment and planning directly from your imported data, including the ability to group your devices as applications and track their migration status.

    To start an import request, do this:

    1. Download the specially formatted comma separated value (CSV) import template, which you can find here: https://s3.us-west-2.amazonaws.com/templates-7cffcf56-bd96-4b1c-b45b-a5b42f282e46/import_template.csv.

    2. Fill out the template with your server and application data.

    3. Upload your import file to an Amazon S3 bucket, and make a note of it's Object URL. Your import file must be in the CSV format.

    4. Use the console or the StartImportTask command with the Amazon Web Services CLI or one of the Amazon Web Services SDKs to import the records from your file.

    For more information, including step-by-step procedures, see Migration Hub Import in the Amazon Web Services Application Discovery Service User Guide.

    There are limits to the number of import tasks you can create (and delete) in an Amazon Web Services account. For more information, see Amazon Web Services Application Discovery Service Limits in the Amazon Web Services Application Discovery Service User Guide.

    " + "documentation":"

    Starts an import task, which allows you to import details of your on-premises environment directly into Amazon Web Services Migration Hub without having to use the Amazon Web Services Application Discovery Service (Application Discovery Service) tools such as the Amazon Web Services Application Discovery Service Agentless Collector or Application Discovery Agent. This gives you the option to perform migration assessment and planning directly from your imported data, including the ability to group your devices as applications and track their migration status.

    To start an import request, do this:

    1. Download the specially formatted comma separated value (CSV) import template, which you can find here: https://s3.us-west-2.amazonaws.com/templates-7cffcf56-bd96-4b1c-b45b-a5b42f282e46/import_template.csv.

    2. Fill out the template with your server and application data.

    3. Upload your import file to an Amazon S3 bucket, and make a note of it's Object URL. Your import file must be in the CSV format.

    4. Use the console or the StartImportTask command with the Amazon Web Services CLI or one of the Amazon Web Services SDKs to import the records from your file.

    For more information, including step-by-step procedures, see Migration Hub Import in the Amazon Web Services Application Discovery Service User Guide.

    There are limits to the number of import tasks you can create (and delete) in an Amazon Web Services account. For more information, see Amazon Web Services Application Discovery Service Limits in the Amazon Web Services Application Discovery Service User Guide.

    " }, "StopContinuousExport":{ "name":"StopContinuousExport", @@ -435,7 +435,7 @@ {"shape":"ServerInternalErrorException"}, {"shape":"HomeRegionNotSetException"} ], - "documentation":"

    Instructs the specified agents or connectors to stop collecting data.

    " + "documentation":"

    Instructs the specified agents to stop collecting data.

    " }, "UpdateApplication":{ "name":"UpdateApplication", @@ -461,18 +461,18 @@ "members":{ "agentId":{ "shape":"String", - "documentation":"

    The agent/connector ID.

    " + "documentation":"

    The agent ID.

    " }, "operationSucceeded":{ "shape":"Boolean", - "documentation":"

    Information about the status of the StartDataCollection and StopDataCollection operations. The system has recorded the data collection operation. The agent/connector receives this command the next time it polls for a new command.

    " + "documentation":"

    Information about the status of the StartDataCollection and StopDataCollection operations. The system has recorded the data collection operation. The agent receives this command the next time it polls for a new command.

    " }, "description":{ "shape":"String", "documentation":"

    A description of the operation performed.

    " } }, - "documentation":"

    Information about agents or connectors that were instructed to start collecting data. Information includes the agent/connector ID, a description of the operation, and whether the agent/connector configuration was updated.

    " + "documentation":"

    Information about agents that were instructed to start collecting data. Information includes the agent ID, a description of the operation, and whether the agent configuration was updated.

    " }, "AgentConfigurationStatusList":{ "type":"list", @@ -493,15 +493,15 @@ "members":{ "agentId":{ "shape":"AgentId", - "documentation":"

    The agent or connector ID.

    " + "documentation":"

    The agent or collector ID.

    " }, "hostName":{ "shape":"String", - "documentation":"

    The name of the host where the agent or connector resides. The host can be a server or virtual machine.

    " + "documentation":"

    The name of the host where the agent or collector resides. The host can be a server or virtual machine.

    " }, "agentNetworkInfoList":{ "shape":"AgentNetworkInfoList", - "documentation":"

    Network details about the host where the agent or connector resides.

    " + "documentation":"

    Network details about the host where the agent or collector resides.

    " }, "connectorId":{ "shape":"String", @@ -509,19 +509,19 @@ }, "version":{ "shape":"String", - "documentation":"

    The agent or connector version.

    " + "documentation":"

    The agent or collector version.

    " }, "health":{ "shape":"AgentStatus", - "documentation":"

    The health of the agent or connector.

    " + "documentation":"

    The health of the agent.

    " }, "lastHealthPingTime":{ "shape":"String", - "documentation":"

    Time since agent or connector health was reported.

    " + "documentation":"

    Time since agent health was reported.

    " }, "collectionStatus":{ "shape":"String", - "documentation":"

    Status of the collection process for an agent or connector.

    " + "documentation":"

    Status of the collection process for an agent.

    " }, "agentType":{ "shape":"String", @@ -532,21 +532,21 @@ "documentation":"

    Agent's first registration timestamp in UTC.

    " } }, - "documentation":"

    Information about agents or connectors associated with the user’s Amazon Web Services account. Information includes agent or connector IDs, IP addresses, media access control (MAC) addresses, agent or connector health, hostname where the agent or connector resides, and agent version for each agent.

    " + "documentation":"

    Information about agents associated with the user’s Amazon Web Services account. Information includes agent IDs, IP addresses, media access control (MAC) addresses, agent or collector status, hostname where the agent resides, and agent version for each agent.

    " }, "AgentNetworkInfo":{ "type":"structure", "members":{ "ipAddress":{ "shape":"String", - "documentation":"

    The IP address for the host where the agent/connector resides.

    " + "documentation":"

    The IP address for the host where the agent/collector resides.

    " }, "macAddress":{ "shape":"String", - "documentation":"

    The MAC address for the host where the agent/connector resides.

    " + "documentation":"

    The MAC address for the host where the agent/collector resides.

    " } }, - "documentation":"

    Network details about the host where the agent/connector resides.

    " + "documentation":"

    Network details about the host where the agent/collector resides.

    " }, "AgentNetworkInfoList":{ "type":"list", @@ -613,7 +613,7 @@ "members":{ "message":{"shape":"Message"} }, - "documentation":"

    The Amazon Web Services user account does not have permission to perform the action. Check the IAM policy associated with this account.

    ", + "documentation":"

    The user does not have permission to perform the action. Check the IAM policy associated with this user.

    ", "exception":true }, "BatchDeleteImportDataError":{ @@ -765,7 +765,7 @@ }, "statusDetail":{ "shape":"StringMax255", - "documentation":"

    Contains information about any errors that have occurred. This data type can have the following values:

    • ACCESS_DENIED - You don’t have permission to start Data Exploration in Amazon Athena. Contact your Amazon Web Services administrator for help. For more information, see Setting Up Amazon Web Services Application Discovery Service in the Application Discovery Service User Guide.

    • DELIVERY_STREAM_LIMIT_FAILURE - You reached the limit for Amazon Kinesis Data Firehose delivery streams. Reduce the number of streams or request a limit increase and try again. For more information, see Kinesis Data Streams Limits in the Amazon Kinesis Data Streams Developer Guide.

    • FIREHOSE_ROLE_MISSING - The Data Exploration feature is in an error state because your IAM User is missing the AWSApplicationDiscoveryServiceFirehose role. Turn on Data Exploration in Amazon Athena and try again. For more information, see Step 3: Provide Application Discovery Service Access to Non-Administrator Users by Attaching Policies in the Application Discovery Service User Guide.

    • FIREHOSE_STREAM_DOES_NOT_EXIST - The Data Exploration feature is in an error state because your IAM User is missing one or more of the Kinesis data delivery streams.

    • INTERNAL_FAILURE - The Data Exploration feature is in an error state because of an internal failure. Try again later. If this problem persists, contact Amazon Web Services Support.

    • LAKE_FORMATION_ACCESS_DENIED - You don't have sufficient lake formation permissions to start continuous export. For more information, see Upgrading Amazon Web Services Glue Data Permissions to the Amazon Web Services Lake Formation Model in the Amazon Web Services Lake Formation Developer Guide.

      You can use one of the following two ways to resolve this issue.

      1. If you don’t want to use the Lake Formation permission model, you can change the default Data Catalog settings to use only Amazon Web Services Identity and Access Management (IAM) access control for new databases. For more information, see Change Data Catalog Settings in the Lake Formation Developer Guide.

      2. You can give the service-linked IAM roles AWSServiceRoleForApplicationDiscoveryServiceContinuousExport and AWSApplicationDiscoveryServiceFirehose the required Lake Formation permissions. For more information, see Granting Database Permissions in the Lake Formation Developer Guide.

        1. AWSServiceRoleForApplicationDiscoveryServiceContinuousExport - Grant database creator permissions, which gives the role database creation ability and implicit permissions for any created tables. For more information, see Implicit Lake Formation Permissions in the Lake Formation Developer Guide.

        2. AWSApplicationDiscoveryServiceFirehose - Grant describe permissions for all tables in the database.

    • S3_BUCKET_LIMIT_FAILURE - You reached the limit for Amazon S3 buckets. Reduce the number of S3 buckets or request a limit increase and try again. For more information, see Bucket Restrictions and Limitations in the Amazon Simple Storage Service Developer Guide.

    • S3_NOT_SIGNED_UP - Your account is not signed up for the Amazon S3 service. You must sign up before you can use Amazon S3. You can sign up at the following URL: https://aws.amazon.com/s3.

    " + "documentation":"

    Contains information about any errors that have occurred. This data type can have the following values:

    • ACCESS_DENIED - You don’t have permission to start Data Exploration in Amazon Athena. Contact your Amazon Web Services administrator for help. For more information, see Setting Up Amazon Web Services Application Discovery Service in the Application Discovery Service User Guide.

    • DELIVERY_STREAM_LIMIT_FAILURE - You reached the limit for Amazon Kinesis Data Firehose delivery streams. Reduce the number of streams or request a limit increase and try again. For more information, see Kinesis Data Streams Limits in the Amazon Kinesis Data Streams Developer Guide.

    • FIREHOSE_ROLE_MISSING - The Data Exploration feature is in an error state because your user is missing the Amazon Web ServicesApplicationDiscoveryServiceFirehose role. Turn on Data Exploration in Amazon Athena and try again. For more information, see Creating the Amazon Web ServicesApplicationDiscoveryServiceFirehose Role in the Application Discovery Service User Guide.

    • FIREHOSE_STREAM_DOES_NOT_EXIST - The Data Exploration feature is in an error state because your user is missing one or more of the Kinesis data delivery streams.

    • INTERNAL_FAILURE - The Data Exploration feature is in an error state because of an internal failure. Try again later. If this problem persists, contact Amazon Web Services Support.

    • LAKE_FORMATION_ACCESS_DENIED - You don't have sufficient lake formation permissions to start continuous export. For more information, see Upgrading Amazon Web Services Glue Data Permissions to the Amazon Web Services Lake Formation Model in the Amazon Web Services Lake Formation Developer Guide.

      You can use one of the following two ways to resolve this issue.

      1. If you don’t want to use the Lake Formation permission model, you can change the default Data Catalog settings to use only Amazon Web Services Identity and Access Management (IAM) access control for new databases. For more information, see Change Data Catalog Settings in the Lake Formation Developer Guide.

      2. You can give the service-linked IAM roles AWSServiceRoleForApplicationDiscoveryServiceContinuousExport and AWSApplicationDiscoveryServiceFirehose the required Lake Formation permissions. For more information, see Granting Database Permissions in the Lake Formation Developer Guide.

        1. AWSServiceRoleForApplicationDiscoveryServiceContinuousExport - Grant database creator permissions, which gives the role database creation ability and implicit permissions for any created tables. For more information, see Implicit Lake Formation Permissions in the Lake Formation Developer Guide.

        2. AWSApplicationDiscoveryServiceFirehose - Grant describe permissions for all tables in the database.

    • S3_BUCKET_LIMIT_FAILURE - You reached the limit for Amazon S3 buckets. Reduce the number of S3 buckets or request a limit increase and try again. For more information, see Bucket Restrictions and Limitations in the Amazon Simple Storage Service Developer Guide.

    • S3_NOT_SIGNED_UP - Your account is not signed up for the Amazon S3 service. You must sign up before you can use Amazon S3. You can sign up at the following URL: https://aws.amazon.com/s3.

    " }, "s3Bucket":{ "shape":"S3Bucket", @@ -910,14 +910,36 @@ "unknownAgentlessCollectors" ], "members":{ - "activeAgentlessCollectors":{"shape":"Integer"}, - "healthyAgentlessCollectors":{"shape":"Integer"}, - "denyListedAgentlessCollectors":{"shape":"Integer"}, - "shutdownAgentlessCollectors":{"shape":"Integer"}, - "unhealthyAgentlessCollectors":{"shape":"Integer"}, - "totalAgentlessCollectors":{"shape":"Integer"}, - "unknownAgentlessCollectors":{"shape":"Integer"} - } + "activeAgentlessCollectors":{ + "shape":"Integer", + "documentation":"

    The number of active Agentless Collector collectors.

    " + }, + "healthyAgentlessCollectors":{ + "shape":"Integer", + "documentation":"

    The number of healthy Agentless Collector collectors.

    " + }, + "denyListedAgentlessCollectors":{ + "shape":"Integer", + "documentation":"

    The number of deny-listed Agentless Collector collectors.

    " + }, + "shutdownAgentlessCollectors":{ + "shape":"Integer", + "documentation":"

    The number of Agentless Collector collectors with SHUTDOWN status.

    " + }, + "unhealthyAgentlessCollectors":{ + "shape":"Integer", + "documentation":"

    The number of unhealthy Agentless Collector collectors.

    " + }, + "totalAgentlessCollectors":{ + "shape":"Integer", + "documentation":"

    The total number of Agentless Collector collectors.

    " + }, + "unknownAgentlessCollectors":{ + "shape":"Integer", + "documentation":"

    The number of unknown Agentless Collector collectors.

    " + } + }, + "documentation":"

    The inventory data for installed Agentless Collector collectors.

    " }, "CustomerConnectorInfo":{ "type":"structure", @@ -1053,7 +1075,7 @@ "members":{ "agentIds":{ "shape":"AgentIds", - "documentation":"

    The agent or the Connector IDs for which you want information. If you specify no IDs, the system returns information about all agents/Connectors associated with your Amazon Web Services user account.

    " + "documentation":"

    The agent or the collector IDs for which you want information. If you specify no IDs, the system returns information about all agents/collectors associated with your user.

    " }, "filters":{ "shape":"Filters", @@ -1061,7 +1083,7 @@ }, "maxResults":{ "shape":"Integer", - "documentation":"

    The total number of agents/Connectors to return in a single page of output. The maximum value is 100.

    " + "documentation":"

    The total number of agents/collectors to return in a single page of output. The maximum value is 100.

    " }, "nextToken":{ "shape":"NextToken", @@ -1074,7 +1096,7 @@ "members":{ "agentsInfo":{ "shape":"AgentsInfo", - "documentation":"

    Lists agents or the Connector by ID or lists all agents/Connectors associated with your user account if you did not specify an agent/Connector ID. The output includes agent/Connector IDs, IP addresses, media access control (MAC) addresses, agent/Connector health, host name where the agent/Connector resides, and the version number of each agent/Connector.

    " + "documentation":"

    Lists agents or the collector by ID or lists all agents/collectors associated with your user, if you did not specify an agent/collector ID. The output includes agent/collector IDs, IP addresses, media access control (MAC) addresses, agent/collector health, host name where the agent/collector resides, and the version number of each agent/collector.

    " }, "nextToken":{ "shape":"NextToken", @@ -1302,6 +1324,50 @@ "members":{ } }, + "EC2InstanceType":{ + "type":"string", + "max":25, + "min":1, + "pattern":"[a-zA-Z0-9\\d\\.\\-]+" + }, + "Ec2RecommendationsExportPreferences":{ + "type":"structure", + "members":{ + "enabled":{ + "shape":"ExportEnabled", + "documentation":"

    If set to true, the export preferences is set to Ec2RecommendationsExportPreferences.

    " + }, + "cpuPerformanceMetricBasis":{ + "shape":"UsageMetricBasis", + "documentation":"

    The recommended EC2 instance type that matches the CPU usage metric of server performance data.

    " + }, + "ramPerformanceMetricBasis":{ + "shape":"UsageMetricBasis", + "documentation":"

    The recommended EC2 instance type that matches the Memory usage metric of server performance data.

    " + }, + "tenancy":{ + "shape":"Tenancy", + "documentation":"

    The target tenancy to use for your recommended EC2 instances.

    " + }, + "excludedInstanceTypes":{ + "shape":"ExcludedInstanceTypes", + "documentation":"

    An array of instance types to exclude from recommendations.

    " + }, + "preferredRegion":{ + "shape":"UserPreferredRegion", + "documentation":"

    The target Amazon Web Services Region for the recommendations. You can use any of the Region codes available for the chosen service, as listed in Amazon Web Services service endpoints in the Amazon Web Services General Reference.

    " + }, + "reservedInstanceOptions":{ + "shape":"ReservedInstanceOptions", + "documentation":"

    The contract type for a reserved instance. If blank, we assume an On-Demand instance is preferred.

    " + } + }, + "documentation":"

    Indicates that the exported data must include EC2 instance type matches for on-premises servers that are discovered through Amazon Web Services Application Discovery Service.

    " + }, + "ExcludedInstanceTypes":{ + "type":"list", + "member":{"shape":"EC2InstanceType"} + }, "ExportConfigurationsResponse":{ "type":"structure", "members":{ @@ -1313,15 +1379,13 @@ }, "ExportDataFormat":{ "type":"string", - "enum":[ - "CSV", - "GRAPHML" - ] + "enum":["CSV"] }, "ExportDataFormats":{ "type":"list", "member":{"shape":"ExportDataFormat"} }, + "ExportEnabled":{"type":"boolean"}, "ExportFilter":{ "type":"structure", "required":[ @@ -1397,6 +1461,17 @@ }, "documentation":"

    Information regarding the export status of discovered data. The value is an array of objects.

    " }, + "ExportPreferences":{ + "type":"structure", + "members":{ + "ec2RecommendationsPreferences":{ + "shape":"Ec2RecommendationsExportPreferences", + "documentation":"

    If enabled, exported data includes EC2 instance type matches for on-premises servers discovered through Amazon Web Services Application Discovery Service.

    " + } + }, + "documentation":"

    Indicates the type of data that is being exported. Only one ExportPreferences can be enabled for a StartExportTask action.

    ", + "union":true + }, "ExportRequestTime":{"type":"timestamp"}, "ExportStatus":{ "type":"string", @@ -1488,7 +1563,10 @@ "shape":"CustomerMeCollectorInfo", "documentation":"

    Details about Migration Evaluator collectors, including collector status and health.

    " }, - "agentlessCollectorSummary":{"shape":"CustomerAgentlessCollectorInfo"} + "agentlessCollectorSummary":{ + "shape":"CustomerAgentlessCollectorInfo", + "documentation":"

    Details about Agentless Collector collectors, including status.

    " + } } }, "HomeRegionNotSetException":{ @@ -1496,7 +1574,7 @@ "members":{ "message":{"shape":"Message"} }, - "documentation":"

    The home region is not set. Set the home region to continue.

    ", + "documentation":"

    The home Region is not set. Set the home Region to continue.

    ", "exception":true }, "ImportStatus":{ @@ -1765,6 +1843,13 @@ "member":{"shape":"NeighborConnectionDetail"} }, "NextToken":{"type":"string"}, + "OfferingClass":{ + "type":"string", + "enum":[ + "STANDARD", + "CONVERTIBLE" + ] + }, "OperationNotPermittedException":{ "type":"structure", "members":{ @@ -1797,6 +1882,37 @@ "type":"list", "member":{"shape":"OrderByElement"} }, + "PurchasingOption":{ + "type":"string", + "enum":[ + "ALL_UPFRONT", + "PARTIAL_UPFRONT", + "NO_UPFRONT" + ] + }, + "ReservedInstanceOptions":{ + "type":"structure", + "required":[ + "purchasingOption", + "offeringClass", + "termLength" + ], + "members":{ + "purchasingOption":{ + "shape":"PurchasingOption", + "documentation":"

    The payment plan to use for your Reserved Instance.

    " + }, + "offeringClass":{ + "shape":"OfferingClass", + "documentation":"

    The flexibility to change the instance types needed for your Reserved Instance.

    " + }, + "termLength":{ + "shape":"TermLength", + "documentation":"

    The preferred duration of the Reserved Instance term.

    " + } + }, + "documentation":"

    Used to provide Reserved Instance preferences for the recommendation.

    " + }, "ResourceInUseException":{ "type":"structure", "members":{ @@ -1865,7 +1981,7 @@ "members":{ "agentIds":{ "shape":"AgentIds", - "documentation":"

    The IDs of the agents or connectors from which to start collecting data. If you send a request to an agent/connector ID that you do not have permission to contact, according to your Amazon Web Services account, the service does not throw an exception. Instead, it returns the error in the Description field. If you send a request to multiple agents/connectors and you do not have permission to contact some of those agents/connectors, the system does not throw an exception. Instead, the system shows Failed in the Description field.

    " + "documentation":"

    The IDs of the agents from which to start collecting data. If you send a request to an agent ID that you do not have permission to contact, according to your Amazon Web Services account, the service does not throw an exception. Instead, it returns the error in the Description field. If you send a request to multiple agents and you do not have permission to contact some of those agents, the system does not throw an exception. Instead, the system shows Failed in the Description field.

    " } } }, @@ -1874,7 +1990,7 @@ "members":{ "agentsConfigurationStatus":{ "shape":"AgentConfigurationStatusList", - "documentation":"

    Information about agents or the connector that were instructed to start collecting data. Information includes the agent/connector ID, a description of the operation performed, and whether the agent/connector configuration was updated.

    " + "documentation":"

    Information about agents that were instructed to start collecting data. Information includes the agent ID, a description of the operation performed, and whether the agent configuration was updated.

    " } } }, @@ -1887,7 +2003,7 @@ }, "filters":{ "shape":"ExportFilters", - "documentation":"

    If a filter is present, it selects the single agentId of the Application Discovery Agent for which data is exported. The agentId can be found in the results of the DescribeAgents API or CLI. If no filter is present, startTime and endTime are ignored and exported data includes both Agentless Discovery Connector data and summary data from Application Discovery agents.

    " + "documentation":"

    If a filter is present, it selects the single agentId of the Application Discovery Agent for which data is exported. The agentId can be found in the results of the DescribeAgents API or CLI. If no filter is present, startTime and endTime are ignored and exported data includes both Amazon Web Services Application Discovery Service Agentless Collector collectors data and summary data from Application Discovery Agent agents.

    " }, "startTime":{ "shape":"TimeStamp", @@ -1896,6 +2012,10 @@ "endTime":{ "shape":"TimeStamp", "documentation":"

    The end timestamp for exported data from the single Application Discovery Agent selected in the filters. If no value is specified, exported data includes the most recent data collected by the agent.

    " + }, + "preferences":{ + "shape":"ExportPreferences", + "documentation":"

    Indicates the type of data that needs to be exported. Only one ExportPreferences can be enabled at any time.

    " } } }, @@ -1968,7 +2088,7 @@ "members":{ "agentIds":{ "shape":"AgentIds", - "documentation":"

    The IDs of the agents or connectors from which to stop collecting data.

    " + "documentation":"

    The IDs of the agents from which to stop collecting data.

    " } } }, @@ -1977,7 +2097,7 @@ "members":{ "agentsConfigurationStatus":{ "shape":"AgentConfigurationStatusList", - "documentation":"

    Information about the agents or connector that were instructed to stop collecting data. Information includes the agent/connector ID, a description of the operation performed, and whether the agent/connector configuration was updated.

    " + "documentation":"

    Information about the agents that were instructed to stop collecting data. Information includes the agent ID, a description of the operation performed, and whether the agent configuration was updated.

    " } } }, @@ -2038,6 +2158,20 @@ "member":{"shape":"Tag"} }, "TagValue":{"type":"string"}, + "Tenancy":{ + "type":"string", + "enum":[ + "DEDICATED", + "SHARED" + ] + }, + "TermLength":{ + "type":"string", + "enum":[ + "ONE_YEAR", + "THREE_YEAR" + ] + }, "TimeStamp":{"type":"timestamp"}, "ToDeleteIdentifierList":{ "type":"list", @@ -2068,6 +2202,35 @@ "members":{ } }, + "UsageMetricBasis":{ + "type":"structure", + "members":{ + "name":{ + "shape":"UsageMetricBasisName", + "documentation":"

    A utilization metric that is used by the recommendations.

    " + }, + "percentageAdjust":{ + "shape":"UsageMetricPercentageAdjust", + "documentation":"

    Specifies the percentage of the specified utilization metric that is used by the recommendations.

    " + } + }, + "documentation":"

    Specifies the performance metrics to use for the server that is used for recommendations.

    " + }, + "UsageMetricBasisName":{ + "type":"string", + "pattern":"^(p(\\d{1,2}|100)|AVG|SPEC|MAX)$" + }, + "UsageMetricPercentageAdjust":{ + "type":"double", + "max":100.0, + "min":0.0 + }, + "UserPreferredRegion":{ + "type":"string", + "max":30, + "min":1, + "pattern":"[a-z]{2}-[a-z\\-]+-[0-9]+" + }, "orderString":{ "type":"string", "enum":[ @@ -2076,5 +2239,5 @@ ] } }, - "documentation":"Amazon Web Services Application Discovery Service

    Amazon Web Services Application Discovery Service helps you plan application migration projects. It automatically identifies servers, virtual machines (VMs), and network dependencies in your on-premises data centers. For more information, see the Amazon Web Services Application Discovery Service FAQ. Application Discovery Service offers three ways of performing discovery and collecting data about your on-premises servers:

    • Agentless discovery is recommended for environments that use VMware vCenter Server. This mode doesn't require you to install an agent on each host. It does not work in non-VMware environments.

      • Agentless discovery gathers server information regardless of the operating systems, which minimizes the time required for initial on-premises infrastructure assessment.

      • Agentless discovery doesn't collect information about network dependencies, only agent-based discovery collects that information.

    • Agent-based discovery collects a richer set of data than agentless discovery by using the Amazon Web Services Application Discovery Agent, which you install on one or more hosts in your data center.

      • The agent captures infrastructure and application information, including an inventory of running processes, system performance information, resource utilization, and network dependencies.

      • The information collected by agents is secured at rest and in transit to the Application Discovery Service database in the cloud.

    • Amazon Web Services Partner Network (APN) solutions integrate with Application Discovery Service, enabling you to import details of your on-premises environment directly into Migration Hub without using the discovery connector or discovery agent.

      • Third-party application discovery tools can query Amazon Web Services Application Discovery Service, and they can write to the Application Discovery Service database using the public API.

      • In this way, you can import data into Migration Hub and view it, so that you can associate applications with servers and track migrations.

    Recommendations

    We recommend that you use agent-based discovery for non-VMware environments, and whenever you want to collect information about network dependencies. You can run agent-based and agentless discovery simultaneously. Use agentless discovery to complete the initial infrastructure assessment quickly, and then install agents on select hosts to collect additional information.

    Working With This Guide

    This API reference provides descriptions, syntax, and usage examples for each of the actions and data types for Application Discovery Service. The topic for each action shows the API request parameters and the response. Alternatively, you can use one of the Amazon Web Services SDKs to access an API that is tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs.

    • Remember that you must set your Migration Hub home region before you call any of these APIs.

    • You must make API calls for write actions (create, notify, associate, disassociate, import, or put) while in your home region, or a HomeRegionNotSetException error is returned.

    • API calls for read actions (list, describe, stop, and delete) are permitted outside of your home region.

    • Although it is unlikely, the Migration Hub home region could change. If you call APIs outside the home region, an InvalidInputException is returned.

    • You must call GetHomeRegion to obtain the latest Migration Hub home region.

    This guide is intended for use with the Amazon Web Services Application Discovery Service User Guide.

    All data is handled according to the Amazon Web Services Privacy Policy. You can operate Application Discovery Service offline to inspect collected data before it is shared with the service.

    " + "documentation":"Amazon Web Services Application Discovery Service

    Amazon Web Services Application Discovery Service (Application Discovery Service) helps you plan application migration projects. It automatically identifies servers, virtual machines (VMs), and network dependencies in your on-premises data centers. For more information, see the Amazon Web Services Application Discovery Service FAQ.

    Application Discovery Service offers three ways of performing discovery and collecting data about your on-premises servers:

    • Agentless discovery using Amazon Web Services Application Discovery Service Agentless Collector (Agentless Collector), which doesn't require you to install an agent on each host.

      • Agentless Collector gathers server information regardless of the operating systems, which minimizes the time required for initial on-premises infrastructure assessment.

      • Agentless Collector doesn't collect information about network dependencies, only agent-based discovery collects that information.

    • Agent-based discovery using the Amazon Web Services Application Discovery Agent (Application Discovery Agent) collects a richer set of data than agentless discovery, which you install on one or more hosts in your data center.

      • The agent captures infrastructure and application information, including an inventory of running processes, system performance information, resource utilization, and network dependencies.

      • The information collected by agents is secured at rest and in transit to the Application Discovery Service database in the Amazon Web Services cloud. For more information, see Amazon Web Services Application Discovery Agent.

    • Amazon Web Services Partner Network (APN) solutions integrate with Application Discovery Service, enabling you to import details of your on-premises environment directly into Amazon Web Services Migration Hub (Migration Hub) without using Agentless Collector or Application Discovery Agent.

      • Third-party application discovery tools can query Amazon Web Services Application Discovery Service, and they can write to the Application Discovery Service database using the public API.

      • In this way, you can import data into Migration Hub and view it, so that you can associate applications with servers and track migrations.

    Working With This Guide

    This API reference provides descriptions, syntax, and usage examples for each of the actions and data types for Application Discovery Service. The topic for each action shows the API request parameters and the response. Alternatively, you can use one of the Amazon Web Services SDKs to access an API that is tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs.

    • Remember that you must set your Migration Hub home Region before you call any of these APIs.

    • You must make API calls for write actions (create, notify, associate, disassociate, import, or put) while in your home Region, or a HomeRegionNotSetException error is returned.

    • API calls for read actions (list, describe, stop, and delete) are permitted outside of your home Region.

    • Although it is unlikely, the Migration Hub home Region could change. If you call APIs outside the home Region, an InvalidInputException is returned.

    • You must call GetHomeRegion to obtain the latest Migration Hub home Region.

    This guide is intended for use with the Amazon Web Services Application Discovery Service User Guide.

    All data is handled according to the Amazon Web Services Privacy Policy. You can operate Application Discovery Service offline to inspect collected data before it is shared with the service.

    " } diff --git a/services/applicationinsights/pom.xml b/services/applicationinsights/pom.xml index 4ba554b738ad..98d3672476d2 100644 --- a/services/applicationinsights/pom.xml +++ b/services/applicationinsights/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT applicationinsights AWS Java SDK :: Services :: Application Insights diff --git a/services/appmesh/pom.xml b/services/appmesh/pom.xml index 63e4ae86cc5c..c7d4ba7455ec 100644 --- a/services/appmesh/pom.xml +++ b/services/appmesh/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT appmesh AWS Java SDK :: Services :: App Mesh diff --git a/services/apprunner/pom.xml b/services/apprunner/pom.xml index 6a10b63908b5..3bf4bb4e1131 100644 --- a/services/apprunner/pom.xml +++ b/services/apprunner/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT apprunner AWS Java SDK :: Services :: App Runner diff --git a/services/appstream/pom.xml b/services/appstream/pom.xml index 3caf1138ba00..7a0780f2b34a 100644 --- a/services/appstream/pom.xml +++ b/services/appstream/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT appstream AWS Java SDK :: Services :: Amazon AppStream diff --git a/services/appsync/pom.xml b/services/appsync/pom.xml index 06dd2a734261..cffd954e11ef 100644 --- a/services/appsync/pom.xml +++ b/services/appsync/pom.xml @@ -21,7 +21,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT appsync diff --git a/services/arczonalshift/pom.xml b/services/arczonalshift/pom.xml index 156011e34aef..5f9ac302d170 100644 --- a/services/arczonalshift/pom.xml +++ b/services/arczonalshift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT arczonalshift AWS Java SDK :: Services :: ARC Zonal Shift diff --git a/services/athena/pom.xml b/services/athena/pom.xml index c02b8e393236..547eb647306e 100644 --- a/services/athena/pom.xml +++ b/services/athena/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT athena AWS Java SDK :: Services :: Amazon Athena diff --git a/services/auditmanager/pom.xml b/services/auditmanager/pom.xml index 473b8ca2f148..d472a45b51e9 100644 --- a/services/auditmanager/pom.xml +++ b/services/auditmanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT auditmanager AWS Java SDK :: Services :: Audit Manager diff --git a/services/auditmanager/src/main/resources/codegen-resources/endpoint-tests.json b/services/auditmanager/src/main/resources/codegen-resources/endpoint-tests.json index 7b3557950f12..6b6545622735 100644 --- a/services/auditmanager/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/auditmanager/src/main/resources/codegen-resources/endpoint-tests.json @@ -9,8 +9,8 @@ }, "params": { "Region": "ap-northeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -22,8 +22,8 @@ }, "params": { "Region": "ap-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -35,8 +35,8 @@ }, "params": { "Region": "ap-southeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -48,8 +48,8 @@ }, "params": { "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -61,8 +61,8 @@ }, "params": { "Region": "ca-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -74,8 +74,8 @@ }, "params": { "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -87,8 +87,8 @@ }, "params": { "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -100,8 +100,8 @@ }, "params": { "Region": "eu-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -113,8 +113,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -126,8 +126,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -139,8 +139,8 @@ }, "params": { "Region": "us-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -152,8 +152,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -165,8 +165,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -178,8 +178,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -191,8 +191,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -204,8 +204,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -217,8 +217,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -230,8 +230,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -243,8 +243,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -256,8 +256,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -269,8 +269,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -282,8 +282,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -295,8 +295,19 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -308,8 +319,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -321,8 +343,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -334,8 +367,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -347,8 +391,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -360,8 +404,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -373,8 +417,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -385,8 +429,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -397,10 +441,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/auditmanager/src/main/resources/codegen-resources/service-2.json b/services/auditmanager/src/main/resources/codegen-resources/service-2.json index 7bef4e5f657c..8420122febbb 100644 --- a/services/auditmanager/src/main/resources/codegen-resources/service-2.json +++ b/services/auditmanager/src/main/resources/codegen-resources/service-2.json @@ -104,9 +104,10 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

    Uploads one or more pieces of evidence to a control in an Audit Manager assessment. You can upload manual evidence from any Amazon Simple Storage Service (Amazon S3) bucket by specifying the S3 URI of the evidence.

    You must upload manual evidence to your S3 bucket before you can upload it to your assessment. For instructions, see CreateBucket and PutObject in the Amazon Simple Storage Service API Reference.

    The following restrictions apply to this action:

    • Maximum size of an individual evidence file: 100 MB

    • Number of daily manual evidence uploads per control: 100

    • Supported file formats: See Supported file types for manual evidence in the Audit Manager User Guide

    For more information about Audit Manager service restrictions, see Quotas and restrictions for Audit Manager.

    " + "documentation":"

    Adds one or more pieces of evidence to a control in an Audit Manager assessment.

    You can import manual evidence from any S3 bucket by specifying the S3 URI of the object. You can also upload a file from your browser, or enter plain text in response to a risk assessment question.

    The following restrictions apply to this action:

    • manualEvidence can be only one of the following: evidenceFileName, s3ResourcePath, or textResponse

    • Maximum size of an individual evidence file: 100 MB

    • Number of daily manual evidence uploads per control: 100

    • Supported file formats: See Supported file types for manual evidence in the Audit Manager User Guide

    For more information about Audit Manager service restrictions, see Quotas and restrictions for Audit Manager.

    " }, "CreateAssessment":{ "name":"CreateAssessment", @@ -253,7 +254,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Deletes a custom control in Audit Manager.

    " + "documentation":"

    Deletes a custom control in Audit Manager.

    When you invoke this operation, the custom control is deleted from any frameworks or assessments that it’s currently part of. As a result, Audit Manager will stop collecting evidence for that custom control in all of your assessments. This includes assessments that you previously created before you deleted the custom control.

    " }, "DeregisterAccount":{ "name":"DeregisterAccount", @@ -314,7 +315,7 @@ "errors":[ {"shape":"InternalServerException"} ], - "documentation":"

    Returns the registration status of an account in Audit Manager.

    " + "documentation":"

    Gets the registration status of an account in Audit Manager.

    " }, "GetAssessment":{ "name":"GetAssessment", @@ -330,7 +331,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Returns an assessment from Audit Manager.

    " + "documentation":"

    Gets information about a specified assessment.

    " }, "GetAssessmentFramework":{ "name":"GetAssessmentFramework", @@ -346,7 +347,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Returns a framework from Audit Manager.

    " + "documentation":"

    Gets information about a specified framework.

    " }, "GetAssessmentReportUrl":{ "name":"GetAssessmentReportUrl", @@ -362,7 +363,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

    Returns the URL of an assessment report in Audit Manager.

    " + "documentation":"

    Gets the URL of an assessment report in Audit Manager.

    " }, "GetChangeLogs":{ "name":"GetChangeLogs", @@ -378,7 +379,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Returns a list of changelogs from Audit Manager.

    " + "documentation":"

    Gets a list of changelogs from Audit Manager.

    " }, "GetControl":{ "name":"GetControl", @@ -394,7 +395,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Returns a control from Audit Manager.

    " + "documentation":"

    Gets information about a specified control.

    " }, "GetDelegations":{ "name":"GetDelegations", @@ -409,7 +410,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Returns a list of delegations from an audit owner to a delegate.

    " + "documentation":"

    Gets a list of delegations from an audit owner to a delegate.

    " }, "GetEvidence":{ "name":"GetEvidence", @@ -425,7 +426,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Returns evidence from Audit Manager.

    " + "documentation":"

    Gets information about a specified evidence item.

    " }, "GetEvidenceByEvidenceFolder":{ "name":"GetEvidenceByEvidenceFolder", @@ -441,7 +442,23 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Returns all evidence from a specified evidence folder in Audit Manager.

    " + "documentation":"

    Gets all evidence from a specified evidence folder in Audit Manager.

    " + }, + "GetEvidenceFileUploadUrl":{ + "name":"GetEvidenceFileUploadUrl", + "http":{ + "method":"GET", + "requestUri":"/evidenceFileUploadUrl" + }, + "input":{"shape":"GetEvidenceFileUploadUrlRequest"}, + "output":{"shape":"GetEvidenceFileUploadUrlResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Creates a presigned Amazon S3 URL that can be used to upload a file as manual evidence. For instructions on how to use this operation, see Upload a file from your browser in the Audit Manager User Guide.

    The following restrictions apply to this operation:

    • Maximum size of an individual evidence file: 100 MB

    • Number of daily manual evidence uploads per control: 100

    • Supported file formats: See Supported file types for manual evidence in the Audit Manager User Guide

    For more information about Audit Manager service restrictions, see Quotas and restrictions for Audit Manager.

    " }, "GetEvidenceFolder":{ "name":"GetEvidenceFolder", @@ -457,7 +474,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Returns an evidence folder from the specified assessment in Audit Manager.

    " + "documentation":"

    Gets an evidence folder from a specified assessment in Audit Manager.

    " }, "GetEvidenceFoldersByAssessment":{ "name":"GetEvidenceFoldersByAssessment", @@ -473,7 +490,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Returns the evidence folders from a specified assessment in Audit Manager.

    " + "documentation":"

    Gets the evidence folders from a specified assessment in Audit Manager.

    " }, "GetEvidenceFoldersByAssessmentControl":{ "name":"GetEvidenceFoldersByAssessmentControl", @@ -489,7 +506,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Returns a list of evidence folders that are associated with a specified control in an Audit Manager assessment.

    " + "documentation":"

    Gets a list of evidence folders that are associated with a specified control in an Audit Manager assessment.

    " }, "GetInsights":{ "name":"GetInsights", @@ -535,7 +552,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

    Returns the name of the delegated Amazon Web Services administrator account for the organization.

    " + "documentation":"

    Gets the name of the delegated Amazon Web Services administrator account for a specified organization.

    " }, "GetServicesInScope":{ "name":"GetServicesInScope", @@ -550,7 +567,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Returns a list of all of the Amazon Web Services that you can choose to include in your assessment. When you create an assessment, specify which of these services you want to include to narrow the assessment's scope.

    " + "documentation":"

    Gets a list of all of the Amazon Web Services that you can choose to include in your assessment. When you create an assessment, specify which of these services you want to include to narrow the assessment's scope.

    " }, "GetSettings":{ "name":"GetSettings", @@ -564,7 +581,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Returns the settings for the specified Amazon Web Services account.

    " + "documentation":"

    Gets the settings for a specified Amazon Web Services account.

    " }, "ListAssessmentControlInsightsByControlDomain":{ "name":"ListAssessmentControlInsightsByControlDomain", @@ -1651,7 +1668,7 @@ }, "destination":{ "shape":"S3Url", - "documentation":"

    The destination of the assessment report.

    " + "documentation":"

    The destination bucket where Audit Manager stores assessment reports.

    " } }, "documentation":"

    The location where Audit Manager saves assessment reports for the given assessment.

    " @@ -1994,7 +2011,7 @@ }, "type":{ "shape":"ControlType", - "documentation":"

    The type of control, such as a custom control or a standard control.

    " + "documentation":"

    Specifies whether the control is a standard control or a custom control.

    " }, "name":{ "shape":"ControlName", @@ -2195,7 +2212,7 @@ "sourceKeyword":{"shape":"SourceKeyword"}, "sourceFrequency":{ "shape":"SourceFrequency", - "documentation":"

    The frequency of evidence collection for the control mapping source.

    " + "documentation":"

    Specifies how often evidence is collected from the control mapping source.

    " }, "troubleshootingText":{ "shape":"TroubleshootingText", @@ -2504,7 +2521,7 @@ "sourceKeyword":{"shape":"SourceKeyword"}, "sourceFrequency":{ "shape":"SourceFrequency", - "documentation":"

    The frequency of evidence collection for the control mapping source.

    " + "documentation":"

    Specifies how often evidence is collected from the control mapping source.

    " }, "troubleshootingText":{ "shape":"TroubleshootingText", @@ -2598,6 +2615,20 @@ "min":1, "pattern":"^[a-zA-Z0-9\\s-_()\\[\\]]+$" }, + "DefaultExportDestination":{ + "type":"structure", + "members":{ + "destinationType":{ + "shape":"ExportDestinationType", + "documentation":"

    The destination type, such as Amazon S3.

    " + }, + "destination":{ + "shape":"S3Url", + "documentation":"

    The destination bucket where Audit Manager stores exported files.

    " + } + }, + "documentation":"

    The default s3 bucket where Audit Manager saves the files that you export from evidence finder.

    " + }, "Delegation":{ "type":"structure", "members":{ @@ -3055,6 +3086,10 @@ "type":"list", "member":{"shape":"NonEmptyString"} }, + "ExportDestinationType":{ + "type":"string", + "enum":["S3"] + }, "Filename":{ "type":"string", "max":255, @@ -3078,11 +3113,11 @@ }, "type":{ "shape":"FrameworkType", - "documentation":"

    The framework type, such as a custom framework or a standard framework.

    " + "documentation":"

    Specifies whether the framework is a standard framework or a custom framework.

    " }, "complianceType":{ "shape":"ComplianceType", - "documentation":"

    The compliance type that the new custom framework supports, such as CIS or HIPAA.

    " + "documentation":"

    The compliance type that the framework supports, such as CIS or HIPAA.

    " }, "description":{ "shape":"FrameworkDescription", @@ -3094,7 +3129,7 @@ }, "controlSources":{ "shape":"ControlSources", - "documentation":"

    The sources that Audit Manager collects evidence from for the control.

    " + "documentation":"

    The control data sources where Audit Manager collects evidence from.

    " }, "controlSets":{ "shape":"ControlSets", @@ -3321,7 +3356,7 @@ "members":{ "control":{ "shape":"Control", - "documentation":"

    The name of the control that the GetControl API returned.

    " + "documentation":"

    The details of the control that the GetControl API returned.

    " } } }, @@ -3408,6 +3443,31 @@ } } }, + "GetEvidenceFileUploadUrlRequest":{ + "type":"structure", + "required":["fileName"], + "members":{ + "fileName":{ + "shape":"ManualEvidenceLocalFileName", + "documentation":"

    The file that you want to upload. For a list of supported file formats, see Supported file types for manual evidence in the Audit Manager User Guide.

    ", + "location":"querystring", + "locationName":"fileName" + } + } + }, + "GetEvidenceFileUploadUrlResponse":{ + "type":"structure", + "members":{ + "evidenceFileName":{ + "shape":"NonEmptyString", + "documentation":"

    The name of the uploaded manual evidence file that the presigned URL was generated for.

    " + }, + "uploadUrl":{ + "shape":"NonEmptyString", + "documentation":"

    The presigned URL that was generated.

    " + } + } + }, "GetEvidenceFolderRequest":{ "type":"structure", "required":[ @@ -3757,7 +3817,11 @@ }, "KeywordInputType":{ "type":"string", - "enum":["SELECT_FROM_LIST"] + "enum":[ + "SELECT_FROM_LIST", + "UPLOAD_FILE", + "INPUT_TEXT" + ] }, "KeywordValue":{ "type":"string", @@ -3893,7 +3957,7 @@ "members":{ "frameworkMetadataList":{ "shape":"FrameworkMetadataList", - "documentation":"

    The list of metadata objects for the framework.

    " + "documentation":"

    A list of metadata that the ListAssessmentFrameworks API returns for each framework.

    " }, "nextToken":{ "shape":"Token", @@ -3963,7 +4027,7 @@ "members":{ "assessmentMetadata":{ "shape":"ListAssessmentMetadata", - "documentation":"

    The metadata that's associated with the assessment.

    " + "documentation":"

    The metadata that the ListAssessments API returns for each assessment.

    " }, "nextToken":{ "shape":"Token", @@ -4104,7 +4168,7 @@ "members":{ "controlMetadataList":{ "shape":"ControlMetadataList", - "documentation":"

    The list of control metadata objects that the ListControls API returned.

    " + "documentation":"

    A list of metadata that the ListControls API returns for each control.

    " }, "nextToken":{ "shape":"Token", @@ -4205,10 +4269,18 @@ "members":{ "s3ResourcePath":{ "shape":"S3Url", - "documentation":"

    The Amazon S3 URL that points to a manual evidence object.

    " + "documentation":"

    The S3 URL of the object that's imported as manual evidence.

    " + }, + "textResponse":{ + "shape":"ManualEvidenceTextResponse", + "documentation":"

    The plain text response that's entered and saved as manual evidence.

    " + }, + "evidenceFileName":{ + "shape":"ManualEvidenceLocalFileName", + "documentation":"

    The name of the file that's uploaded as manual evidence. This name is populated using the evidenceFileName value from the GetEvidenceFileUploadUrl API response.

    " } }, - "documentation":"

    Evidence that's uploaded to Audit Manager manually.

    " + "documentation":"

    Evidence that's manually added to a control in Audit Manager. manualEvidence can be one of the following: evidenceFileName, s3ResourcePath, or textResponse.

    " }, "ManualEvidenceList":{ "type":"list", @@ -4216,6 +4288,18 @@ "max":50, "min":1 }, + "ManualEvidenceLocalFileName":{ + "type":"string", + "max":300, + "min":1, + "pattern":"[^\\/]*" + }, + "ManualEvidenceTextResponse":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"^[\\w\\W\\s\\S]*$" + }, "MaxResults":{ "type":"integer", "documentation":"Max results in the page.", @@ -4480,7 +4564,8 @@ "DEFAULT_ASSESSMENT_REPORTS_DESTINATION", "DEFAULT_PROCESS_OWNERS", "EVIDENCE_FINDER_ENABLEMENT", - "DEREGISTRATION_POLICY" + "DEREGISTRATION_POLICY", + "DEFAULT_EXPORT_DESTINATION" ] }, "Settings":{ @@ -4496,7 +4581,7 @@ }, "defaultAssessmentReportsDestination":{ "shape":"AssessmentReportsDestination", - "documentation":"

    The default storage destination for assessment reports.

    " + "documentation":"

    The default S3 destination bucket for storing assessment reports.

    " }, "defaultProcessOwners":{ "shape":"Roles", @@ -4513,6 +4598,10 @@ "deregistrationPolicy":{ "shape":"DeregistrationPolicy", "documentation":"

    The deregistration policy for your Audit Manager data. You can use this attribute to determine how your data is handled when you deregister Audit Manager.

    " + }, + "defaultExportDestination":{ + "shape":"DefaultExportDestination", + "documentation":"

    The default S3 destination bucket for storing evidence finder exports.

    " } }, "documentation":"

    The settings object that holds all supported Audit Manager settings.

    " @@ -4574,14 +4663,14 @@ "members":{ "keywordInputType":{ "shape":"KeywordInputType", - "documentation":"

    The input method for the keyword.

    " + "documentation":"

    The input method for the keyword.

    • SELECT_FROM_LIST is used when mapping a data source for automated evidence.

      • When keywordInputType is SELECT_FROM_LIST, a keyword must be selected to collect automated evidence. For example, this keyword can be a CloudTrail event name, a rule name for Config, a Security Hub control, or the name of an Amazon Web Services API call.

    • UPLOAD_FILE and INPUT_TEXT are only used when mapping a data source for manual evidence.

      • When keywordInputType is UPLOAD_FILE, a file must be uploaded as manual evidence.

      • When keywordInputType is INPUT_TEXT, text must be entered as manual evidence.

    " }, "keywordValue":{ "shape":"KeywordValue", - "documentation":"

    The value of the keyword that's used when mapping a control data source. For example, this can be a CloudTrail event name, a rule name for Config, a Security Hub control, or the name of an Amazon Web Services API call.

    If you’re mapping a data source to a rule in Config, the keywordValue that you specify depends on the type of rule:

    • For managed rules, you can use the rule identifier as the keywordValue. You can find the rule identifier from the list of Config managed rules.

    • For custom rules, you form the keywordValue by adding the Custom_ prefix to the rule name. This prefix distinguishes the rule from a managed rule.

      • Custom rule name: my-custom-config-rule

        keywordValue: Custom_my-custom-config-rule

    • For service-linked rules, you form the keywordValue by adding the Custom_ prefix to the rule name. In addition, you remove the suffix ID that appears at the end of the rule name.

      • Service-linked rule name: CustomRuleForAccount-conformance-pack-szsm1uv0w

        keywordValue: Custom_CustomRuleForAccount-conformance-pack

      • Service-linked rule name: OrgConfigRule-s3-bucket-versioning-enabled-dbgzf8ba

        keywordValue: Custom_OrgConfigRule-s3-bucket-versioning-enabled

    " + "documentation":"

    The value of the keyword that's used when mapping a control data source. For example, this can be a CloudTrail event name, a rule name for Config, a Security Hub control, or the name of an Amazon Web Services API call.

    If you’re mapping a data source to a rule in Config, the keywordValue that you specify depends on the type of rule:

    • For managed rules, you can use the rule identifier as the keywordValue. You can find the rule identifier from the list of Config managed rules. For some rules, the rule identifier is different from the rule name. For example, the rule name restricted-ssh has the following rule identifier: INCOMING_SSH_DISABLED. Make sure to use the rule identifier, not the rule name.

      Keyword example for managed rules:

    • For custom rules, you form the keywordValue by adding the Custom_ prefix to the rule name. This prefix distinguishes the custom rule from a managed rule.

      Keyword example for custom rules:

      • Custom rule name: my-custom-config-rule

        keywordValue: Custom_my-custom-config-rule

    • For service-linked rules, you form the keywordValue by adding the Custom_ prefix to the rule name. In addition, you remove the suffix ID that appears at the end of the rule name.

      Keyword examples for service-linked rules:

      • Service-linked rule name: CustomRuleForAccount-conformance-pack-szsm1uv0w

        keywordValue: Custom_CustomRuleForAccount-conformance-pack

      • Service-linked rule name: OrgConfigRule-s3-bucket-versioning-enabled-dbgzf8ba

        keywordValue: Custom_OrgConfigRule-s3-bucket-versioning-enabled

    The keywordValue is case sensitive. If you enter a value incorrectly, Audit Manager might not recognize the data source mapping. As a result, you might not successfully collect evidence from that data source as intended.

    Keep in mind the following requirements, depending on the data source type that you're using.

    1. For Config:

      • For managed rules, make sure that the keywordValue is the rule identifier in ALL_CAPS_WITH_UNDERSCORES. For example, CLOUDWATCH_LOG_GROUP_ENCRYPTED. For accuracy, we recommend that you reference the list of supported Config managed rules.

      • For custom rules, make sure that the keywordValue has the Custom_ prefix followed by the custom rule name. The format of the custom rule name itself may vary. For accuracy, we recommend that you visit the Config console to verify your custom rule name.

    2. For Security Hub: The format varies for Security Hub control names. For accuracy, we recommend that you reference the list of supported Security Hub controls.

    3. For Amazon Web Services API calls: Make sure that the keywordValue is written as serviceprefix_ActionName. For example, iam_ListGroups. For accuracy, we recommend that you reference the list of supported API calls.

    4. For CloudTrail: Make sure that the keywordValue is written as serviceprefix_ActionName. For example, cloudtrail_StartLogging. For accuracy, we recommend that you review the Amazon Web Service prefix and action names in the Service Authorization Reference.

    " } }, - "documentation":"

    The keyword to search for in CloudTrail logs, Config rules, Security Hub checks, and Amazon Web Services API names.

    To learn more about the supported keywords that you can use when mapping a control data source, see the following pages in the Audit Manager User Guide:

    " + "documentation":"

    A keyword that relates to the control data source.

    For manual evidence, this keyword indicates if the manual evidence is a file or text.

    For automated evidence, this keyword identifies a specific CloudTrail event, Config rule, Security Hub control, or Amazon Web Services API name.

    To learn more about the supported keywords that you can use when mapping a control data source, see the following pages in the Audit Manager User Guide:

    " }, "SourceName":{ "type":"string", @@ -5092,7 +5181,7 @@ }, "defaultAssessmentReportsDestination":{ "shape":"AssessmentReportsDestination", - "documentation":"

    The default storage destination for assessment reports.

    " + "documentation":"

    The default S3 destination bucket for storing assessment reports.

    " }, "defaultProcessOwners":{ "shape":"Roles", @@ -5109,6 +5198,10 @@ "deregistrationPolicy":{ "shape":"DeregistrationPolicy", "documentation":"

    The deregistration policy for your Audit Manager data. You can use this attribute to determine how your data is handled when you deregister Audit Manager.

    " + }, + "defaultExportDestination":{ + "shape":"DefaultExportDestination", + "documentation":"

    The default S3 destination bucket for storing evidence finder exports.

    " } } }, diff --git a/services/autoscaling/pom.xml b/services/autoscaling/pom.xml index 6bd556fe56fa..3d5fb2d13ff4 100644 --- a/services/autoscaling/pom.xml +++ b/services/autoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT autoscaling AWS Java SDK :: Services :: Auto Scaling diff --git a/services/autoscalingplans/pom.xml b/services/autoscalingplans/pom.xml index 202aea7ebfae..8bfa83d35b07 100644 --- a/services/autoscalingplans/pom.xml +++ b/services/autoscalingplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT autoscalingplans AWS Java SDK :: Services :: Auto Scaling Plans diff --git a/services/backup/pom.xml b/services/backup/pom.xml index 03e9663ac5e0..706c7930c23c 100644 --- a/services/backup/pom.xml +++ b/services/backup/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT backup AWS Java SDK :: Services :: Backup diff --git a/services/backupgateway/pom.xml b/services/backupgateway/pom.xml index 8ed16c82b993..1621dc1e3b19 100644 --- a/services/backupgateway/pom.xml +++ b/services/backupgateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT backupgateway AWS Java SDK :: Services :: Backup Gateway diff --git a/services/backupstorage/pom.xml b/services/backupstorage/pom.xml index 42b0c4f1dc52..cbacf949c312 100644 --- a/services/backupstorage/pom.xml +++ b/services/backupstorage/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT backupstorage AWS Java SDK :: Services :: Backup Storage diff --git a/services/batch/pom.xml b/services/batch/pom.xml index b0a31d8a0db7..27f51a4796bc 100644 --- a/services/batch/pom.xml +++ b/services/batch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT batch AWS Java SDK :: Services :: AWS Batch diff --git a/services/billingconductor/pom.xml b/services/billingconductor/pom.xml index 64729b743968..517ff238f1ca 100644 --- a/services/billingconductor/pom.xml +++ b/services/billingconductor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT billingconductor AWS Java SDK :: Services :: Billingconductor diff --git a/services/braket/pom.xml b/services/braket/pom.xml index 068a8a0905e0..c7d2cebecbad 100644 --- a/services/braket/pom.xml +++ b/services/braket/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT braket AWS Java SDK :: Services :: Braket diff --git a/services/budgets/pom.xml b/services/budgets/pom.xml index 84861843ae2b..abcb771ab1d0 100644 --- a/services/budgets/pom.xml +++ b/services/budgets/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT budgets AWS Java SDK :: Services :: AWS Budgets diff --git a/services/chime/pom.xml b/services/chime/pom.xml index 355865594453..f2029ade545e 100644 --- a/services/chime/pom.xml +++ b/services/chime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT chime AWS Java SDK :: Services :: Chime diff --git a/services/chimesdkidentity/pom.xml b/services/chimesdkidentity/pom.xml index bb0f5fdd7cc3..a8bf09cdc673 100644 --- a/services/chimesdkidentity/pom.xml +++ b/services/chimesdkidentity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT chimesdkidentity AWS Java SDK :: Services :: Chime SDK Identity diff --git a/services/chimesdkmediapipelines/pom.xml b/services/chimesdkmediapipelines/pom.xml index 4ffcb331492a..b3e3e9b73bbd 100644 --- a/services/chimesdkmediapipelines/pom.xml +++ b/services/chimesdkmediapipelines/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT chimesdkmediapipelines AWS Java SDK :: Services :: Chime SDK Media Pipelines diff --git a/services/chimesdkmeetings/pom.xml b/services/chimesdkmeetings/pom.xml index f852d1ff3426..89ef092cd5ba 100644 --- a/services/chimesdkmeetings/pom.xml +++ b/services/chimesdkmeetings/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT chimesdkmeetings AWS Java SDK :: Services :: Chime SDK Meetings diff --git a/services/chimesdkmessaging/pom.xml b/services/chimesdkmessaging/pom.xml index 42ffb8a4f368..ae61018fdfec 100644 --- a/services/chimesdkmessaging/pom.xml +++ b/services/chimesdkmessaging/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT chimesdkmessaging AWS Java SDK :: Services :: Chime SDK Messaging diff --git a/services/chimesdkvoice/pom.xml b/services/chimesdkvoice/pom.xml index 138c8b16d4e9..b8dda5edbd38 100644 --- a/services/chimesdkvoice/pom.xml +++ b/services/chimesdkvoice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT chimesdkvoice AWS Java SDK :: Services :: Chime SDK Voice diff --git a/services/cleanrooms/pom.xml b/services/cleanrooms/pom.xml index c8d00e2f65af..6a3416c060e6 100644 --- a/services/cleanrooms/pom.xml +++ b/services/cleanrooms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT cleanrooms AWS Java SDK :: Services :: Clean Rooms diff --git a/services/cloud9/pom.xml b/services/cloud9/pom.xml index 4bd5ab8b96bc..95a249c34826 100644 --- a/services/cloud9/pom.xml +++ b/services/cloud9/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 cloud9 diff --git a/services/cloudcontrol/pom.xml b/services/cloudcontrol/pom.xml index 0ca125cb8198..bac7f2dff2b4 100644 --- a/services/cloudcontrol/pom.xml +++ b/services/cloudcontrol/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT cloudcontrol AWS Java SDK :: Services :: Cloud Control diff --git a/services/clouddirectory/pom.xml b/services/clouddirectory/pom.xml index 2528757d9a13..48cc0db7db69 100644 --- a/services/clouddirectory/pom.xml +++ b/services/clouddirectory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT clouddirectory AWS Java SDK :: Services :: Amazon CloudDirectory diff --git a/services/cloudformation/pom.xml b/services/cloudformation/pom.xml index c5a80f871b3e..36187f75019d 100644 --- a/services/cloudformation/pom.xml +++ b/services/cloudformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT cloudformation AWS Java SDK :: Services :: AWS CloudFormation diff --git a/services/cloudformation/src/main/resources/codegen-resources/service-2.json b/services/cloudformation/src/main/resources/codegen-resources/service-2.json index 72e35e8937e2..9b07253c5b67 100644 --- a/services/cloudformation/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudformation/src/main/resources/codegen-resources/service-2.json @@ -1745,6 +1745,10 @@ "IncludeNestedStacks":{ "shape":"IncludeNestedStacks", "documentation":"

    Creates a change set for the all nested stacks specified in the template. The default behavior of this action is set to False. To include nested sets in a change set, specify True.

    " + }, + "OnStackFailure":{ + "shape":"OnStackFailure", + "documentation":"

    Determines what action will be taken if stack creation fails. If this parameter is specified, the DisableRollback parameter to the ExecuteChangeSet API operation must not be specified. This must be one of these values:

    • DELETE - Deletes the change set if the stack creation fails. This is only valid when the ChangeSetType parameter is set to CREATE. If the deletion of the stack fails, the status of the stack is DELETE_FAILED.

    • DO_NOTHING - if the stack creation fails, do nothing. This is equivalent to specifying true for the DisableRollback parameter to the ExecuteChangeSet API operation.

    • ROLLBACK - if the stack creation fails, roll back the stack. This is equivalent to specifying false for the DisableRollback parameter to the ExecuteChangeSet API operation.

    For nested stacks, when the OnStackFailure parameter is set to DELETE for the change set for the parent stack, any failure in a child stack will cause the parent stack creation to fail and all stacks to be deleted.

    " } }, "documentation":"

    The input for the CreateChangeSet action.

    " @@ -2367,6 +2371,10 @@ "RootChangeSetId":{ "shape":"ChangeSetId", "documentation":"

    Specifies the change set ID of the root change set in the current nested change set hierarchy.

    " + }, + "OnStackFailure":{ + "shape":"OnStackFailure", + "documentation":"

    Determines what action will be taken if stack creation fails. When this parameter is specified, the DisableRollback parameter to the ExecuteChangeSet API operation must not be specified. This must be one of these values:

    • DELETE - Deletes the change set if the stack creation fails. This is only valid when the ChangeSetType parameter is set to CREATE. If the deletion of the stack fails, the status of the stack is DELETE_FAILED.

    • DO_NOTHING - if the stack creation fails, do nothing. This is equivalent to specifying true for the DisableRollback parameter to the ExecuteChangeSet API operation.

    • ROLLBACK - if the stack creation fails, roll back the stack. This is equivalent to specifying false for the DisableRollback parameter to the ExecuteChangeSet API operation.

    " } }, "documentation":"

    The output for the DescribeChangeSet action.

    " @@ -3043,7 +3051,7 @@ }, "DisableRollback":{ "shape":"DisableRollback", - "documentation":"

    Preserves the state of previously provisioned resources when an operation fails.

    Default: True

    " + "documentation":"

    Preserves the state of previously provisioned resources when an operation fails. This parameter can't be specified when the OnStackFailure parameter to the CreateChangeSet API operation was specified.

    • True - if the stack creation fails, do nothing. This is equivalent to specifying DO_NOTHING for the OnStackFailure parameter to the CreateChangeSet API operation.

    • False - if the stack creation fails, roll back the stack. This is equivalent to specifying ROLLBACK for the OnStackFailure parameter to the CreateChangeSet API operation.

    Default: True

    " } }, "documentation":"

    The input for the ExecuteChangeSet action.

    " @@ -3992,6 +4000,14 @@ "DELETE" ] }, + "OnStackFailure":{ + "type":"string", + "enum":[ + "DO_NOTHING", + "ROLLBACK", + "DELETE" + ] + }, "OperationIdAlreadyExistsException":{ "type":"structure", "members":{ diff --git a/services/cloudfront/pom.xml b/services/cloudfront/pom.xml index b2e143a48660..890fac88eb6f 100644 --- a/services/cloudfront/pom.xml +++ b/services/cloudfront/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT cloudfront AWS Java SDK :: Services :: Amazon CloudFront diff --git a/services/cloudhsm/pom.xml b/services/cloudhsm/pom.xml index c708437918f2..06be29d2e7c0 100644 --- a/services/cloudhsm/pom.xml +++ b/services/cloudhsm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT cloudhsm AWS Java SDK :: Services :: AWS CloudHSM diff --git a/services/cloudhsmv2/pom.xml b/services/cloudhsmv2/pom.xml index 49d4757f3ae8..23b4087dd4b7 100644 --- a/services/cloudhsmv2/pom.xml +++ b/services/cloudhsmv2/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 cloudhsmv2 diff --git a/services/cloudsearch/pom.xml b/services/cloudsearch/pom.xml index 980f0da72446..7e6d91ac300a 100644 --- a/services/cloudsearch/pom.xml +++ b/services/cloudsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT cloudsearch AWS Java SDK :: Services :: Amazon CloudSearch diff --git a/services/cloudsearchdomain/pom.xml b/services/cloudsearchdomain/pom.xml index 7739185e734d..6fd93fca34ae 100644 --- a/services/cloudsearchdomain/pom.xml +++ b/services/cloudsearchdomain/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT cloudsearchdomain AWS Java SDK :: Services :: Amazon CloudSearch Domain diff --git a/services/cloudtrail/pom.xml b/services/cloudtrail/pom.xml index 7d44763e28bf..be0492893c1e 100644 --- a/services/cloudtrail/pom.xml +++ b/services/cloudtrail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT cloudtrail AWS Java SDK :: Services :: AWS CloudTrail diff --git a/services/cloudtrail/src/main/resources/codegen-resources/service-2.json b/services/cloudtrail/src/main/resources/codegen-resources/service-2.json index f413d344ea0c..9ded7eeb1e5f 100644 --- a/services/cloudtrail/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudtrail/src/main/resources/codegen-resources/service-2.json @@ -287,7 +287,7 @@ {"shape":"UnsupportedOperationException"}, {"shape":"NoManagementAccountSLRExistsException"} ], - "documentation":"

    Returns metadata about a query, including query run time in milliseconds, number of events scanned and matched, and query status. You must specify an ARN for EventDataStore, and a value for QueryID.

    ", + "documentation":"

    Returns metadata about a query, including query run time in milliseconds, number of events scanned and matched, and query status. If the query results were delivered to an S3 bucket, the response also provides the S3 URI and the delivery status.

    You must specify either a QueryID or a QueryAlias. Specifying the QueryAlias parameter returns information about the last query run for the alias.

    ", "idempotent":true }, "DescribeTrails":{ @@ -419,7 +419,7 @@ {"shape":"UnsupportedOperationException"}, {"shape":"NoManagementAccountSLRExistsException"} ], - "documentation":"

    Gets event data results of a query. You must specify the QueryID value returned by the StartQuery operation, and an ARN for EventDataStore.

    " + "documentation":"

    Gets event data results of a query. You must specify the QueryID value returned by the StartQuery operation.

    " }, "GetResourcePolicy":{ "name":"GetResourcePolicy", @@ -887,7 +887,7 @@ {"shape":"UnsupportedOperationException"}, {"shape":"NoManagementAccountSLRExistsException"} ], - "documentation":"

    Starts a CloudTrail Lake query. The required QueryStatement parameter provides your SQL query, enclosed in single quotation marks. Use the optional DeliveryS3Uri parameter to deliver the query results to an S3 bucket.

    ", + "documentation":"

    Starts a CloudTrail Lake query. Use the QueryStatement parameter to provide your SQL query, enclosed in single quotation marks. Use the optional DeliveryS3Uri parameter to deliver the query results to an S3 bucket.

    StartQuery requires you specify either the QueryStatement parameter, or a QueryAlias and any QueryParameters. In the current release, the QueryAlias and QueryParameters parameters are used only for the queries that populate the CloudTrail Lake dashboards.

    ", "idempotent":true }, "StopEventDataStoreIngestion":{ @@ -983,6 +983,7 @@ "input":{"shape":"UpdateEventDataStoreRequest"}, "output":{"shape":"UpdateEventDataStoreResponse"}, "errors":[ + {"shape":"EventDataStoreAlreadyExistsException"}, {"shape":"EventDataStoreARNInvalidException"}, {"shape":"EventDataStoreNotFoundException"}, {"shape":"InvalidEventSelectorsException"}, @@ -1002,7 +1003,7 @@ {"shape":"NoManagementAccountSLRExistsException"}, {"shape":"OrganizationNotInAllFeaturesModeException"} ], - "documentation":"

    Updates an event data store. The required EventDataStore value is an ARN or the ID portion of the ARN. Other parameters are optional, but at least one optional parameter must be specified, or CloudTrail throws an error. RetentionPeriod is in days, and valid values are integers between 90 and 2557. By default, TerminationProtection is enabled.

    For event data stores for CloudTrail events, AdvancedEventSelectors includes or excludes management and data events in your event data store. For more information about AdvancedEventSelectors, see PutEventSelectorsRequest$AdvancedEventSelectors.

    For event data stores for Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, AdvancedEventSelectors includes events of that type in your event data store.

    ", + "documentation":"

    Updates an event data store. The required EventDataStore value is an ARN or the ID portion of the ARN. Other parameters are optional, but at least one optional parameter must be specified, or CloudTrail throws an error. RetentionPeriod is in days, and valid values are integers between 90 and 2557. By default, TerminationProtection is enabled.

    For event data stores for CloudTrail events, AdvancedEventSelectors includes or excludes management and data events in your event data store. For more information about AdvancedEventSelectors, see AdvancedEventSelectors.

    For event data stores for Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, AdvancedEventSelectors includes events of that type in your event data store.

    ", "idempotent":true }, "UpdateTrail":{ @@ -1135,7 +1136,7 @@ "members":{ "Field":{ "shape":"SelectorField", - "documentation":"

    A field in a CloudTrail event record on which to filter events to be logged. For event data stores for Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, the field is used only for selecting events as filtering is not supported.

    For CloudTrail event records, supported fields include readOnly, eventCategory, eventSource (for management events), eventName, resources.type, and resources.ARN.

    For event data stores for Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, the only supported field is eventCategory.

    • readOnly - Optional. Can be set to Equals a value of true or false. If you do not add this field, CloudTrail logs both read and write events. A value of true logs only read events. A value of false logs only write events.

    • eventSource - For filtering management events only. This can be set only to NotEquals kms.amazonaws.com.

    • eventName - Can use any operator. You can use it to filter in or filter out any data event logged to CloudTrail, such as PutBucket or GetSnapshotBlock. You can have multiple values for this field, separated by commas.

    • eventCategory - This is required and must be set to Equals.

      • For CloudTrail event records, the value must be Management or Data.

      • For Config configuration items, the value must be ConfigurationItem.

      • For Audit Manager evidence, the value must be Evidence.

      • For non-Amazon Web Services events, the value must be ActivityAuditLog.

    • resources.type - This field is required for CloudTrail data events. resources.type can only use the Equals operator, and the value can be one of the following:

      • AWS::DynamoDB::Table

      • AWS::Lambda::Function

      • AWS::S3::Object

      • AWS::CloudTrail::Channel

      • AWS::Cognito::IdentityPool

      • AWS::DynamoDB::Stream

      • AWS::EC2::Snapshot

      • AWS::FinSpace::Environment

      • AWS::Glue::Table

      • AWS::GuardDuty::Detector

      • AWS::KendraRanking::ExecutionPlan

      • AWS::ManagedBlockchain::Node

      • AWS::SageMaker::ExperimentTrialComponent

      • AWS::SageMaker::FeatureGroup

      • AWS::S3::AccessPoint

      • AWS::S3ObjectLambda::AccessPoint

      • AWS::S3Outposts::Object

      You can have only one resources.type field per selector. To log data events on more than one resource type, add another selector.

    • resources.ARN - You can use any operator with resources.ARN, but if you use Equals or NotEquals, the value must exactly match the ARN of a valid resource of the type you've specified in the template as the value of resources.type. For example, if resources.type equals AWS::S3::Object, the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the StartsWith operator, and include only the bucket ARN as the matching value.

      The trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.

      • arn:<partition>:s3:::<bucket_name>/

      • arn:<partition>:s3:::<bucket_name>/<object_path>/

      When resources.type equals AWS::DynamoDB::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:dynamodb:<region>:<account_ID>:table/<table_name>

      When resources.type equals AWS::Lambda::Function, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:lambda:<region>:<account_ID>:function:<function_name>

      When resources.type equals AWS::CloudTrail::Channel, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:cloudtrail:<region>:<account_ID>:channel/<channel_UUID>

      When resources.type equals AWS::Cognito::IdentityPool, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:cognito-identity:<region>:<account_ID>:identitypool/<identity_pool_ID>

      When resources.type equals AWS::DynamoDB::Stream, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:dynamodb:<region>:<account_ID>:table/<table_name>/stream/<date_time>

      When resources.type equals AWS::EC2::Snapshot, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:ec2:<region>::snapshot/<snapshot_ID>

      When resources.type equals AWS::FinSpace::Environment, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:finspace:<region>:<account_ID>:environment/<environment_ID>

      When resources.type equals AWS::Glue::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:glue:<region>:<account_ID>:table/<database_name>/<table_name>

      When resources.type equals AWS::GuardDuty::Detector, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:guardduty:<region>:<account_ID>:detector/<detector_ID>

      When resources.type equals AWS::KendraRanking::ExecutionPlan, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:kendra-ranking:<region>:<account_ID>:rescore-execution-plan/<rescore_execution_plan_ID>

      When resources.type equals AWS::ManagedBlockchain::Node, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:managedblockchain:<region>:<account_ID>:nodes/<node_ID>

      When resources.type equals AWS::SageMaker::ExperimentTrialComponent, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:sagemaker:<region>:<account_ID>:experiment-trial-component/<experiment_trial_component_name>

      When resources.type equals AWS::SageMaker::FeatureGroup, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:sagemaker:<region>:<account_ID>:feature-group/<feature_group_name>

      When resources.type equals AWS::S3::AccessPoint, and the operator is set to Equals or NotEquals, the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don’t include the object path, and use the StartsWith or NotStartsWith operators.

      • arn:<partition>:s3:<region>:<account_ID>:accesspoint/<access_point_name>

      • arn:<partition>:s3:<region>:<account_ID>:accesspoint/<access_point_name>/object/<object_path>

      When resources.type equals AWS::S3ObjectLambda::AccessPoint, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:s3-object-lambda:<region>:<account_ID>:accesspoint/<access_point_name>

      When resources.type equals AWS::S3Outposts::Object, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:s3-outposts:<region>:<account_ID>:<object_path>

    " + "documentation":"

    A field in a CloudTrail event record on which to filter events to be logged. For event data stores for Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, the field is used only for selecting events as filtering is not supported.

    For CloudTrail event records, supported fields include readOnly, eventCategory, eventSource (for management events), eventName, resources.type, and resources.ARN.

    For event data stores for Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, the only supported field is eventCategory.

    • readOnly - Optional. Can be set to Equals a value of true or false. If you do not add this field, CloudTrail logs both read and write events. A value of true logs only read events. A value of false logs only write events.

    • eventSource - For filtering management events only. This can be set only to NotEquals kms.amazonaws.com.

    • eventName - Can use any operator. You can use it to filter in or filter out any data event logged to CloudTrail, such as PutBucket or GetSnapshotBlock. You can have multiple values for this field, separated by commas.

    • eventCategory - This is required and must be set to Equals.

      • For CloudTrail event records, the value must be Management or Data.

      • For Config configuration items, the value must be ConfigurationItem.

      • For Audit Manager evidence, the value must be Evidence.

      • For non-Amazon Web Services events, the value must be ActivityAuditLog.

    • resources.type - This field is required for CloudTrail data events. resources.type can only use the Equals operator, and the value can be one of the following:

      • AWS::DynamoDB::Table

      • AWS::Lambda::Function

      • AWS::S3::Object

      • AWS::CloudTrail::Channel

      • AWS::CodeWhisperer::Profile

      • AWS::Cognito::IdentityPool

      • AWS::DynamoDB::Stream

      • AWS::EC2::Snapshot

      • AWS::EMRWAL::Workspace

      • AWS::FinSpace::Environment

      • AWS::Glue::Table

      • AWS::GuardDuty::Detector

      • AWS::KendraRanking::ExecutionPlan

      • AWS::ManagedBlockchain::Node

      • AWS::SageMaker::ExperimentTrialComponent

      • AWS::SageMaker::FeatureGroup

      • AWS::S3::AccessPoint

      • AWS::S3ObjectLambda::AccessPoint

      • AWS::S3Outposts::Object

      You can have only one resources.type field per selector. To log data events on more than one resource type, add another selector.

    • resources.ARN - You can use any operator with resources.ARN, but if you use Equals or NotEquals, the value must exactly match the ARN of a valid resource of the type you've specified in the template as the value of resources.type. For example, if resources.type equals AWS::S3::Object, the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the StartsWith operator, and include only the bucket ARN as the matching value.

      The trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.

      • arn:<partition>:s3:::<bucket_name>/

      • arn:<partition>:s3:::<bucket_name>/<object_path>/

      When resources.type equals AWS::DynamoDB::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:dynamodb:<region>:<account_ID>:table/<table_name>

      When resources.type equals AWS::Lambda::Function, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:lambda:<region>:<account_ID>:function:<function_name>

      When resources.type equals AWS::CloudTrail::Channel, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:cloudtrail:<region>:<account_ID>:channel/<channel_UUID>

      When resources.type equals AWS::CodeWhisperer::Profile, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:codewhisperer:<region>:<account_ID>:profile/<profile_ID>

      When resources.type equals AWS::Cognito::IdentityPool, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:cognito-identity:<region>:<account_ID>:identitypool/<identity_pool_ID>

      When resources.type equals AWS::DynamoDB::Stream, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:dynamodb:<region>:<account_ID>:table/<table_name>/stream/<date_time>

      When resources.type equals AWS::EC2::Snapshot, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:ec2:<region>::snapshot/<snapshot_ID>

      When resources.type equals AWS::EMRWAL::Workspace, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:emrwal:<region>::workspace/<workspace_name>

      When resources.type equals AWS::FinSpace::Environment, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:finspace:<region>:<account_ID>:environment/<environment_ID>

      When resources.type equals AWS::Glue::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:glue:<region>:<account_ID>:table/<database_name>/<table_name>

      When resources.type equals AWS::GuardDuty::Detector, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:guardduty:<region>:<account_ID>:detector/<detector_ID>

      When resources.type equals AWS::KendraRanking::ExecutionPlan, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:kendra-ranking:<region>:<account_ID>:rescore-execution-plan/<rescore_execution_plan_ID>

      When resources.type equals AWS::ManagedBlockchain::Node, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:managedblockchain:<region>:<account_ID>:nodes/<node_ID>

      When resources.type equals AWS::SageMaker::ExperimentTrialComponent, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:sagemaker:<region>:<account_ID>:experiment-trial-component/<experiment_trial_component_name>

      When resources.type equals AWS::SageMaker::FeatureGroup, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:sagemaker:<region>:<account_ID>:feature-group/<feature_group_name>

      When resources.type equals AWS::S3::AccessPoint, and the operator is set to Equals or NotEquals, the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don’t include the object path, and use the StartsWith or NotStartsWith operators.

      • arn:<partition>:s3:<region>:<account_ID>:accesspoint/<access_point_name>

      • arn:<partition>:s3:<region>:<account_ID>:accesspoint/<access_point_name>/object/<object_path>

      When resources.type equals AWS::S3ObjectLambda::AccessPoint, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:s3-object-lambda:<region>:<account_ID>:accesspoint/<access_point_name>

      When resources.type equals AWS::S3Outposts::Object, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

      • arn:<partition>:s3-outposts:<region>:<account_ID>:<object_path>

    " }, "Equals":{ "shape":"Operator", @@ -1517,7 +1518,7 @@ }, "SnsTopicName":{ "shape":"String", - "documentation":"

    This field is no longer in use. Use SnsTopicARN.

    ", + "documentation":"

    This field is no longer in use. Use SnsTopicARN.

    ", "deprecated":true }, "SnsTopicARN":{ @@ -1564,7 +1565,7 @@ "members":{ "Type":{ "shape":"String", - "documentation":"

    The resource type in which you want to log data events. You can specify the following basic event selector resource types:

    • AWS::DynamoDB::Table

    • AWS::Lambda::Function

    • AWS::S3::Object

    The following resource types are also available through advanced event selectors. Basic event selector resource types are valid in advanced event selectors, but advanced event selector resource types are not valid in basic event selectors. For more information, see AdvancedFieldSelector$Field.

    • AWS::CloudTrail::Channel

    • AWS::Cognito::IdentityPool

    • AWS::DynamoDB::Stream

    • AWS::EC2::Snapshot

    • AWS::FinSpace::Environment

    • AWS::Glue::Table

    • AWS::GuardDuty::Detector

    • AWS::KendraRanking::ExecutionPlan

    • AWS::ManagedBlockchain::Node

    • AWS::SageMaker::ExperimentTrialComponent

    • AWS::SageMaker::FeatureGroup

    • AWS::S3::AccessPoint

    • AWS::S3ObjectLambda::AccessPoint

    • AWS::S3Outposts::Object

    " + "documentation":"

    The resource type in which you want to log data events. You can specify the following basic event selector resource types:

    • AWS::DynamoDB::Table

    • AWS::Lambda::Function

    • AWS::S3::Object

    The following resource types are also available through advanced event selectors. Basic event selector resource types are valid in advanced event selectors, but advanced event selector resource types are not valid in basic event selectors. For more information, see AdvancedFieldSelector.

    • AWS::CloudTrail::Channel

    • AWS::CodeWhisperer::Profile

    • AWS::Cognito::IdentityPool

    • AWS::DynamoDB::Stream

    • AWS::EC2::Snapshot

    • AWS::EMRWAL::Workspace

    • AWS::FinSpace::Environment

    • AWS::Glue::Table

    • AWS::GuardDuty::Detector

    • AWS::KendraRanking::ExecutionPlan

    • AWS::ManagedBlockchain::Node

    • AWS::SageMaker::ExperimentTrialComponent

    • AWS::SageMaker::FeatureGroup

    • AWS::S3::AccessPoint

    • AWS::S3ObjectLambda::AccessPoint

    • AWS::S3Outposts::Object

    " }, "Values":{ "shape":"DataResourceValues", @@ -1689,7 +1690,6 @@ }, "DescribeQueryRequest":{ "type":"structure", - "required":["QueryId"], "members":{ "EventDataStore":{ "shape":"EventDataStoreArn", @@ -1700,6 +1700,10 @@ "QueryId":{ "shape":"UUID", "documentation":"

    The query ID.

    " + }, + "QueryAlias":{ + "shape":"QueryAlias", + "documentation":"

    The alias that identifies a query template.

    " } } }, @@ -3401,6 +3405,12 @@ }, "documentation":"

    A SQL string of criteria about events that you want to collect in an event data store.

    " }, + "QueryAlias":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[a-zA-Z][a-zA-Z0-9._\\-]*$" + }, "QueryIdNotFoundException":{ "type":"structure", "members":{ @@ -3408,6 +3418,18 @@ "documentation":"

    The query ID does not exist or does not map to a query.

    ", "exception":true }, + "QueryParameter":{ + "type":"string", + "max":1024, + "min":1, + "pattern":".*" + }, + "QueryParameters":{ + "type":"list", + "member":{"shape":"QueryParameter"}, + "max":10, + "min":1 + }, "QueryResultColumn":{ "type":"map", "key":{"shape":"QueryResultKey"}, @@ -3842,7 +3864,6 @@ }, "StartQueryRequest":{ "type":"structure", - "required":["QueryStatement"], "members":{ "QueryStatement":{ "shape":"QueryStatement", @@ -3851,6 +3872,14 @@ "DeliveryS3Uri":{ "shape":"DeliveryS3Uri", "documentation":"

    The URI for the S3 bucket where CloudTrail delivers the query results.

    " + }, + "QueryAlias":{ + "shape":"QueryAlias", + "documentation":"

    The alias that identifies a query template.

    " + }, + "QueryParameters":{ + "shape":"QueryParameters", + "documentation":"

    The query parameters for the specified QueryAlias.

    " } } }, @@ -4003,7 +4032,7 @@ }, "SnsTopicName":{ "shape":"String", - "documentation":"

    This field is no longer in use. Use SnsTopicARN.

    ", + "documentation":"

    This field is no longer in use. Use SnsTopicARN.

    ", "deprecated":true }, "SnsTopicARN":{ @@ -4315,7 +4344,7 @@ }, "SnsTopicName":{ "shape":"String", - "documentation":"

    This field is no longer in use. Use UpdateTrailResponse$SnsTopicARN.

    ", + "documentation":"

    This field is no longer in use. Use SnsTopicARN.

    ", "deprecated":true }, "SnsTopicARN":{ @@ -4358,5 +4387,5 @@ "documentation":"

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    " } }, - "documentation":"CloudTrail

    This is the CloudTrail API Reference. It provides descriptions of actions, data types, common parameters, and common errors for CloudTrail.

    CloudTrail is a web service that records Amazon Web Services API calls for your Amazon Web Services account and delivers log files to an Amazon S3 bucket. The recorded information includes the identity of the user, the start time of the Amazon Web Services API call, the source IP address, the request parameters, and the response elements returned by the service.

    As an alternative to the API, you can use one of the Amazon Web Services SDKs, which consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide programmatic access to CloudTrail. For example, the SDKs handle cryptographically signing requests, managing errors, and retrying requests automatically. For more information about the Amazon Web Services SDKs, including how to download and install them, see Tools to Build on Amazon Web Services.

    See the CloudTrail User Guide for information about the data that is included with each Amazon Web Services API call listed in the log files.

    Actions available for CloudTrail trails

    The following actions are available for CloudTrail trails.

    Actions available for CloudTrail event data stores

    The following actions are available for CloudTrail event data stores.

    Actions available for CloudTrail channels

    The following actions are available for CloudTrail channels.

    Actions available for managing delegated administrators

    The following actions are available for adding or a removing a delegated administrator to manage an Organizations organization’s CloudTrail resources.

    " + "documentation":"CloudTrail

    This is the CloudTrail API Reference. It provides descriptions of actions, data types, common parameters, and common errors for CloudTrail.

    CloudTrail is a web service that records Amazon Web Services API calls for your Amazon Web Services account and delivers log files to an Amazon S3 bucket. The recorded information includes the identity of the user, the start time of the Amazon Web Services API call, the source IP address, the request parameters, and the response elements returned by the service.

    As an alternative to the API, you can use one of the Amazon Web Services SDKs, which consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide programmatic access to CloudTrail. For example, the SDKs handle cryptographically signing requests, managing errors, and retrying requests automatically. For more information about the Amazon Web Services SDKs, including how to download and install them, see Tools to Build on Amazon Web Services.

    See the CloudTrail User Guide for information about the data that is included with each Amazon Web Services API call listed in the log files.

    " } diff --git a/services/cloudtraildata/pom.xml b/services/cloudtraildata/pom.xml index 3e79f026ca26..4cdbd8a54b65 100644 --- a/services/cloudtraildata/pom.xml +++ b/services/cloudtraildata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT cloudtraildata AWS Java SDK :: Services :: Cloud Trail Data diff --git a/services/cloudwatch/pom.xml b/services/cloudwatch/pom.xml index 8adf72270fef..a347c526e3e4 100644 --- a/services/cloudwatch/pom.xml +++ b/services/cloudwatch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT cloudwatch AWS Java SDK :: Services :: Amazon CloudWatch diff --git a/services/cloudwatchevents/pom.xml b/services/cloudwatchevents/pom.xml index bd0be7956e86..b67be6e57f68 100644 --- a/services/cloudwatchevents/pom.xml +++ b/services/cloudwatchevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT cloudwatchevents AWS Java SDK :: Services :: Amazon CloudWatch Events diff --git a/services/cloudwatchlogs/pom.xml b/services/cloudwatchlogs/pom.xml index 229b397d6a57..b0947c696bd8 100644 --- a/services/cloudwatchlogs/pom.xml +++ b/services/cloudwatchlogs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT cloudwatchlogs AWS Java SDK :: Services :: Amazon CloudWatch Logs diff --git a/services/codeartifact/pom.xml b/services/codeartifact/pom.xml index 2edfe62e276f..236b2d69c2bc 100644 --- a/services/codeartifact/pom.xml +++ b/services/codeartifact/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT codeartifact AWS Java SDK :: Services :: Codeartifact diff --git a/services/codebuild/pom.xml b/services/codebuild/pom.xml index 9c4e47df4daf..07ab6fd71852 100644 --- a/services/codebuild/pom.xml +++ b/services/codebuild/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT codebuild AWS Java SDK :: Services :: AWS Code Build diff --git a/services/codecatalyst/pom.xml b/services/codecatalyst/pom.xml index e6a0962e56d8..2988c5948dab 100644 --- a/services/codecatalyst/pom.xml +++ b/services/codecatalyst/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT codecatalyst AWS Java SDK :: Services :: Code Catalyst diff --git a/services/codecommit/pom.xml b/services/codecommit/pom.xml index 97fe29006e55..42337999c46e 100644 --- a/services/codecommit/pom.xml +++ b/services/codecommit/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT codecommit AWS Java SDK :: Services :: AWS CodeCommit diff --git a/services/codedeploy/pom.xml b/services/codedeploy/pom.xml index 75283bf5795a..7a2d9a7f3a58 100644 --- a/services/codedeploy/pom.xml +++ b/services/codedeploy/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT codedeploy AWS Java SDK :: Services :: AWS CodeDeploy diff --git a/services/codeguruprofiler/pom.xml b/services/codeguruprofiler/pom.xml index 349a0dc31155..3cbfef72cc54 100644 --- a/services/codeguruprofiler/pom.xml +++ b/services/codeguruprofiler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT codeguruprofiler AWS Java SDK :: Services :: CodeGuruProfiler diff --git a/services/codegurureviewer/pom.xml b/services/codegurureviewer/pom.xml index a220ebd405ec..6fb33d0188cd 100644 --- a/services/codegurureviewer/pom.xml +++ b/services/codegurureviewer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT codegurureviewer AWS Java SDK :: Services :: CodeGuru Reviewer diff --git a/services/codegurusecurity/pom.xml b/services/codegurusecurity/pom.xml new file mode 100644 index 000000000000..851d935d9bd0 --- /dev/null +++ b/services/codegurusecurity/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.20.90-SNAPSHOT + + codegurusecurity + AWS Java SDK :: Services :: Code Guru Security + The AWS Java SDK for Code Guru Security module holds the client classes that are used for + communicating with Code Guru Security. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.codegurusecurity + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/codegurusecurity/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/codegurusecurity/src/main/resources/codegen-resources/endpoint-rule-set.json new file mode 100644 index 000000000000..9c6f96dac9a8 --- /dev/null +++ b/services/codegurusecurity/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://codeguru-security-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://codeguru-security-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://codeguru-security.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://codeguru-security.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + } + ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] + } + ] +} \ No newline at end of file diff --git a/services/codegurusecurity/src/main/resources/codegen-resources/endpoint-tests.json b/services/codegurusecurity/src/main/resources/codegen-resources/endpoint-tests.json new file mode 100644 index 000000000000..df56b8cd3835 --- /dev/null +++ b/services/codegurusecurity/src/main/resources/codegen-resources/endpoint-tests.json @@ -0,0 +1,295 @@ +{ + "testCases": [ + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security.us-gov-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security-fips.us-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security.us-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security.us-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://codeguru-security.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/services/codegurusecurity/src/main/resources/codegen-resources/paginators-1.json b/services/codegurusecurity/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..03e1cbfce771 --- /dev/null +++ b/services/codegurusecurity/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,22 @@ +{ + "pagination": { + "GetFindings": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "findings" + }, + "ListFindingsMetrics": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "findingsMetrics" + }, + "ListScans": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "summaries" + } + } +} diff --git a/services/codegurusecurity/src/main/resources/codegen-resources/service-2.json b/services/codegurusecurity/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..024d90e9101e --- /dev/null +++ b/services/codegurusecurity/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1514 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-05-10", + "endpointPrefix":"codeguru-security", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon CodeGuru Security", + "serviceId":"CodeGuru Security", + "signatureVersion":"v4", + "signingName":"codeguru-security", + "uid":"codeguru-security-2018-05-10" + }, + "operations":{ + "BatchGetFindings":{ + "name":"BatchGetFindings", + "http":{ + "method":"POST", + "requestUri":"/batchGetFindings", + "responseCode":200 + }, + "input":{"shape":"BatchGetFindingsRequest"}, + "output":{"shape":"BatchGetFindingsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Returns a list of all requested findings.

    " + }, + "CreateScan":{ + "name":"CreateScan", + "http":{ + "method":"POST", + "requestUri":"/scans", + "responseCode":200 + }, + "input":{"shape":"CreateScanRequest"}, + "output":{"shape":"CreateScanResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Use to create a scan using code uploaded to an S3 bucket.

    " + }, + "CreateUploadUrl":{ + "name":"CreateUploadUrl", + "http":{ + "method":"POST", + "requestUri":"/uploadUrl", + "responseCode":200 + }, + "input":{"shape":"CreateUploadUrlRequest"}, + "output":{"shape":"CreateUploadUrlResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Generates a pre-signed URL and request headers used to upload a code resource.

    You can upload your code resource to the URL and add the request headers using any HTTP client.

    " + }, + "GetAccountConfiguration":{ + "name":"GetAccountConfiguration", + "http":{ + "method":"GET", + "requestUri":"/accountConfiguration/get", + "responseCode":200 + }, + "input":{"shape":"GetAccountConfigurationRequest"}, + "output":{"shape":"GetAccountConfigurationResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Use to get account level configuration.

    " + }, + "GetFindings":{ + "name":"GetFindings", + "http":{ + "method":"GET", + "requestUri":"/findings/{scanName}", + "responseCode":200 + }, + "input":{"shape":"GetFindingsRequest"}, + "output":{"shape":"GetFindingsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Returns a list of all findings generated by a particular scan.

    " + }, + "GetMetricsSummary":{ + "name":"GetMetricsSummary", + "http":{ + "method":"GET", + "requestUri":"/metrics/summary", + "responseCode":200 + }, + "input":{"shape":"GetMetricsSummaryRequest"}, + "output":{"shape":"GetMetricsSummaryResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Returns top level metrics about an account from a specified date, including number of open findings, the categories with most findings, the scans with most open findings, and scans with most open critical findings.

    " + }, + "GetScan":{ + "name":"GetScan", + "http":{ + "method":"GET", + "requestUri":"/scans/{scanName}", + "responseCode":200 + }, + "input":{"shape":"GetScanRequest"}, + "output":{"shape":"GetScanResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Returns details about a scan, including whether or not a scan has completed.

    " + }, + "ListFindingsMetrics":{ + "name":"ListFindingsMetrics", + "http":{ + "method":"GET", + "requestUri":"/metrics/findings", + "responseCode":200 + }, + "input":{"shape":"ListFindingsMetricsRequest"}, + "output":{"shape":"ListFindingsMetricsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Returns metrics about all findings in an account within a specified time range.

    " + }, + "ListScans":{ + "name":"ListScans", + "http":{ + "method":"GET", + "requestUri":"/scans", + "responseCode":200 + }, + "input":{"shape":"ListScansRequest"}, + "output":{"shape":"ListScansResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Returns a list of all the scans in an account.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Returns a list of all tags associated with a scan.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}", + "responseCode":204 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Use to add one or more tags to an existing scan.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}", + "responseCode":204 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Use to remove one or more tags from an existing scan.

    ", + "idempotent":true + }, + "UpdateAccountConfiguration":{ + "name":"UpdateAccountConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/updateAccountConfiguration", + "responseCode":200 + }, + "input":{"shape":"UpdateAccountConfigurationRequest"}, + "output":{"shape":"UpdateAccountConfigurationResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Use to update account-level configuration with an encryption key.

    " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":[ + "errorCode", + "message" + ], + "members":{ + "errorCode":{ + "shape":"String", + "documentation":"

    The identifier for the error.

    " + }, + "message":{ + "shape":"String", + "documentation":"

    Description of the error.

    " + }, + "resourceId":{ + "shape":"String", + "documentation":"

    The identifier for the resource you don't have access to.

    " + }, + "resourceType":{ + "shape":"String", + "documentation":"

    The type of resource you don't have access to.

    " + } + }, + "documentation":"

    You do not have sufficient access to perform this action.

    ", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AccountFindingsMetric":{ + "type":"structure", + "members":{ + "closedFindings":{ + "shape":"FindingMetricsValuePerSeverity", + "documentation":"

    The number of closed findings of each severity in an account on the specified date.

    " + }, + "date":{ + "shape":"Timestamp", + "documentation":"

    The date from which the finding metrics were retrieved.

    " + }, + "meanTimeToClose":{ + "shape":"FindingMetricsValuePerSeverity", + "documentation":"

    The average time it takes to close findings of each severity in days.

    " + }, + "newFindings":{ + "shape":"FindingMetricsValuePerSeverity", + "documentation":"

    The number of new findings of each severity in account on the specified date.

    " + }, + "openFindings":{ + "shape":"FindingMetricsValuePerSeverity", + "documentation":"

    The number of open findings of each severity in an account as of the specified date.

    " + } + }, + "documentation":"

    A summary of findings metrics in an account.

    " + }, + "AnalysisType":{ + "type":"string", + "enum":[ + "Security", + "All" + ] + }, + "BatchGetFindingsError":{ + "type":"structure", + "required":[ + "errorCode", + "findingId", + "message", + "scanName" + ], + "members":{ + "errorCode":{ + "shape":"ErrorCode", + "documentation":"

    A code associated with the type of error.

    " + }, + "findingId":{ + "shape":"String", + "documentation":"

    The finding ID of the finding that was not fetched.

    " + }, + "message":{ + "shape":"String", + "documentation":"

    Describes the error.

    " + }, + "scanName":{ + "shape":"ScanName", + "documentation":"

    The name of the scan that generated the finding.

    " + } + }, + "documentation":"

    Contains information about the error that caused a finding to fail to be retrieved.

    " + }, + "BatchGetFindingsErrors":{ + "type":"list", + "member":{"shape":"BatchGetFindingsError"} + }, + "BatchGetFindingsRequest":{ + "type":"structure", + "required":["findingIdentifiers"], + "members":{ + "findingIdentifiers":{ + "shape":"FindingIdentifiers", + "documentation":"

    A list of finding identifiers. Each identifier consists of a scanName and a findingId. You retrieve the findingId when you call GetFindings.

    " + } + } + }, + "BatchGetFindingsResponse":{ + "type":"structure", + "required":[ + "failedFindings", + "findings" + ], + "members":{ + "failedFindings":{ + "shape":"BatchGetFindingsErrors", + "documentation":"

    A list of errors for individual findings which were not fetched. Each BatchGetFindingsError contains the scanName, findingId, errorCode and error message.

    " + }, + "findings":{ + "shape":"Findings", + "documentation":"

    A list of all requested findings.

    " + } + } + }, + "CategoriesWithMostFindings":{ + "type":"list", + "member":{"shape":"CategoryWithFindingNum"}, + "max":5, + "min":0 + }, + "CategoryWithFindingNum":{ + "type":"structure", + "members":{ + "categoryName":{ + "shape":"String", + "documentation":"

    The name of the finding category. A finding category is determined by the detector that detected the finding.

    " + }, + "findingNumber":{ + "shape":"Integer", + "documentation":"

    The number of open findings in the category.

    " + } + }, + "documentation":"

    Information about a finding category with open findings.

    " + }, + "ClientToken":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[\\S]+$" + }, + "CodeLine":{ + "type":"structure", + "members":{ + "content":{ + "shape":"String", + "documentation":"

    The code that contains a vulnerability.

    " + }, + "number":{ + "shape":"Integer", + "documentation":"

    The code line number.

    " + } + }, + "documentation":"

    The line of code where a finding was detected.

    " + }, + "CodeSnippet":{ + "type":"list", + "member":{"shape":"CodeLine"} + }, + "ConflictException":{ + "type":"structure", + "required":[ + "errorCode", + "message", + "resourceId", + "resourceType" + ], + "members":{ + "errorCode":{ + "shape":"String", + "documentation":"

    The identifier for the error.

    " + }, + "message":{ + "shape":"String", + "documentation":"

    Description of the error.

    " + }, + "resourceId":{ + "shape":"String", + "documentation":"

    The identifier for the service resource associated with the request.

    " + }, + "resourceType":{ + "shape":"String", + "documentation":"

    The type of resource associated with the request.

    " + } + }, + "documentation":"

    The requested operation would cause a conflict with the current state of a service resource associated with the request. Resolve the conflict before retrying this request.

    ", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "CreateScanRequest":{ + "type":"structure", + "required":[ + "resourceId", + "scanName" + ], + "members":{ + "analysisType":{ + "shape":"AnalysisType", + "documentation":"

    The type of analysis you want CodeGuru Security to perform in the scan, either Security or All. The Secuirty type only generates findings related to security. The All type generates both security findings and quality findings. Defaults to Security type if missing.

    " + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    The idempotency token for the request. Amazon CodeGuru Security uses this value to prevent the accidental creation of duplicate scans if there are failures and retries.

    ", + "idempotencyToken":true + }, + "resourceId":{ + "shape":"ResourceId", + "documentation":"

    The identifier for an input resource used to create a scan.

    " + }, + "scanName":{ + "shape":"ScanName", + "documentation":"

    The unique name that CodeGuru Security uses to track revisions across multiple scans of the same resource. Only allowed for a STANDARD scan type. If not specified, it will be auto generated.

    " + }, + "scanType":{ + "shape":"ScanType", + "documentation":"

    The type of scan, either Standard or Express. Defaults to Standard type if missing.

    Express scans run on limited resources and use a limited set of detectors to analyze your code in near-real time. Standard scans have standard resource limits and use the full set of detectors to analyze your code.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    An array of key-value pairs used to tag a scan. A tag is a custom attribute label with two parts:

    • A tag key. For example, CostCenter, Environment, or Secret. Tag keys are case sensitive.

    • An optional tag value field. For example, 111122223333, Production, or a team name. Omitting the tag value is the same as using an empty string. Tag values are case sensitive.

    " + } + } + }, + "CreateScanResponse":{ + "type":"structure", + "required":[ + "resourceId", + "runId", + "scanName", + "scanState" + ], + "members":{ + "resourceId":{ + "shape":"ResourceId", + "documentation":"

    The identifier for the resource object that contains resources that were scanned.

    " + }, + "runId":{ + "shape":"Uuid", + "documentation":"

    UUID that identifies the individual scan run.

    " + }, + "scanName":{ + "shape":"ScanName", + "documentation":"

    The name of the scan.

    " + }, + "scanNameArn":{ + "shape":"ScanNameArn", + "documentation":"

    The ARN for the scan name.

    " + }, + "scanState":{ + "shape":"ScanState", + "documentation":"

    The current state of the scan. Returns either InProgress, Successful, or Failed.

    " + } + } + }, + "CreateUploadUrlRequest":{ + "type":"structure", + "required":["scanName"], + "members":{ + "scanName":{ + "shape":"ScanName", + "documentation":"

    The name of the scan that will use the uploaded resource. CodeGuru Security uses the unique scan name to track revisions across multiple scans of the same resource. Use this scanName when you call CreateScan on the code resource you upload to this URL.

    " + } + } + }, + "CreateUploadUrlResponse":{ + "type":"structure", + "required":[ + "codeArtifactId", + "requestHeaders", + "s3Url" + ], + "members":{ + "codeArtifactId":{ + "shape":"Uuid", + "documentation":"

    The identifier for the uploaded code resource.

    " + }, + "requestHeaders":{ + "shape":"RequestHeaderMap", + "documentation":"

    A set of key-value pairs that contain the required headers when uploading your resource.

    " + }, + "s3Url":{ + "shape":"S3Url", + "documentation":"

    A pre-signed S3 URL. You can upload the code file you want to scan and add the required requestHeaders using any HTTP client.

    " + } + } + }, + "DetectorTags":{ + "type":"list", + "member":{"shape":"String"} + }, + "Double":{ + "type":"double", + "box":true + }, + "EncryptionConfig":{ + "type":"structure", + "members":{ + "kmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

    The KMS key ARN to use for encryption. This must be provided as a header when uploading your code resource.

    " + } + }, + "documentation":"

    Information about account-level configuration.

    " + }, + "ErrorCode":{ + "type":"string", + "enum":[ + "DUPLICATE_IDENTIFIER", + "ITEM_DOES_NOT_EXIST", + "INTERNAL_ERROR", + "INVALID_FINDING_ID", + "INVALID_SCAN_NAME" + ] + }, + "FilePath":{ + "type":"structure", + "members":{ + "codeSnippet":{ + "shape":"CodeSnippet", + "documentation":"

    A list of CodeLine objects that describe where the security vulnerability appears in your code.

    " + }, + "endLine":{ + "shape":"Integer", + "documentation":"

    The last line number of the code snippet where the security vulnerability appears in your code.

    " + }, + "name":{ + "shape":"String", + "documentation":"

    The name of the file.

    " + }, + "path":{ + "shape":"String", + "documentation":"

    The path to the resource with the security vulnerability.

    " + }, + "startLine":{ + "shape":"Integer", + "documentation":"

    The first line number of the code snippet where the security vulnerability appears in your code.

    " + } + }, + "documentation":"

    Information about the location of security vulnerabilities that Amazon CodeGuru Security detected in your code.

    " + }, + "Finding":{ + "type":"structure", + "members":{ + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The time when the finding was created.

    " + }, + "description":{ + "shape":"String", + "documentation":"

    A description of the finding.

    " + }, + "detectorId":{ + "shape":"String", + "documentation":"

    The identifier for the detector that detected the finding in your code. A detector is a defined rule based on industry standards and AWS best practices.

    " + }, + "detectorName":{ + "shape":"String", + "documentation":"

    The name of the detector that identified the security vulnerability in your code.

    " + }, + "detectorTags":{ + "shape":"DetectorTags", + "documentation":"

    One or more tags or categorizations that are associated with a detector. These tags are defined by type, programming language, or other classification such as maintainability or consistency.

    " + }, + "generatorId":{ + "shape":"String", + "documentation":"

    The identifier for the component that generated a finding such as AWSCodeGuruSecurity or AWSInspector.

    " + }, + "id":{ + "shape":"String", + "documentation":"

    The identifier for a finding.

    " + }, + "remediation":{ + "shape":"Remediation", + "documentation":"

    An object that contains the details about how to remediate a finding.

    " + }, + "resource":{ + "shape":"Resource", + "documentation":"

    The resource where Amazon CodeGuru Security detected a finding.

    " + }, + "ruleId":{ + "shape":"String", + "documentation":"

    The identifier for the rule that generated the finding.

    " + }, + "severity":{ + "shape":"Severity", + "documentation":"

    The severity of the finding.

    " + }, + "status":{ + "shape":"Status", + "documentation":"

    The status of the finding. A finding status can be open or closed.

    " + }, + "title":{ + "shape":"String", + "documentation":"

    The title of the finding.

    " + }, + "type":{ + "shape":"String", + "documentation":"

    The type of finding.

    " + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

    The time when the finding was last updated. Findings are updated when you remediate them or when the finding code location changes.

    " + }, + "vulnerability":{ + "shape":"Vulnerability", + "documentation":"

    An object that describes the detected security vulnerability.

    " + } + }, + "documentation":"

    Information about a finding that was detected in your code.

    " + }, + "FindingIdentifier":{ + "type":"structure", + "required":[ + "findingId", + "scanName" + ], + "members":{ + "findingId":{ + "shape":"String", + "documentation":"

    The identifier for a finding.

    " + }, + "scanName":{ + "shape":"String", + "documentation":"

    The name of the scan that generated the finding.

    " + } + }, + "documentation":"

    An object that contains information about a finding and the scan that generated it.

    " + }, + "FindingIdentifiers":{ + "type":"list", + "member":{"shape":"FindingIdentifier"}, + "max":25, + "min":1 + }, + "FindingMetricsValuePerSeverity":{ + "type":"structure", + "members":{ + "critical":{ + "shape":"Double", + "documentation":"

    The severity of the finding is critical and should be addressed immediately.

    " + }, + "high":{ + "shape":"Double", + "documentation":"

    The severity of the finding is high and should be addressed as a near-term priority.

    " + }, + "info":{ + "shape":"Double", + "documentation":"

    The finding is related to quality or readability improvements and not considered actionable.

    " + }, + "low":{ + "shape":"Double", + "documentation":"

    The severity of the finding is low and does require action on its own.

    " + }, + "medium":{ + "shape":"Double", + "documentation":"

    The severity of the finding is medium and should be addressed as a mid-term priority.

    " + } + }, + "documentation":"

    The severity of the issue in the code that generated a finding.

    " + }, + "Findings":{ + "type":"list", + "member":{"shape":"Finding"} + }, + "FindingsMetricList":{ + "type":"list", + "member":{"shape":"AccountFindingsMetric"} + }, + "GetAccountConfigurationRequest":{ + "type":"structure", + "members":{ + } + }, + "GetAccountConfigurationResponse":{ + "type":"structure", + "required":["encryptionConfig"], + "members":{ + "encryptionConfig":{ + "shape":"EncryptionConfig", + "documentation":"

    An EncryptionConfig object that contains the KMS key ARN to use for encryption. By default, CodeGuru Security uses an AWS-managed key for encryption. To specify your own key, call UpdateAccountConfiguration.

    " + } + } + }, + "GetFindingsRequest":{ + "type":"structure", + "required":["scanName"], + "members":{ + "maxResults":{ + "shape":"GetFindingsRequestMaxResultsInteger", + "documentation":"

    The maximum number of results to return in the response. Use this parameter when paginating results. If additional results exist beyond the number you specify, the nextToken element is returned in the response. Use nextToken in a subsequent request to retrieve additional results.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after the first page.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "scanName":{ + "shape":"ScanName", + "documentation":"

    The name of the scan you want to retrieve findings from.

    ", + "location":"uri", + "locationName":"scanName" + }, + "status":{ + "shape":"Status", + "documentation":"

    The status of the findings you want to get. Pass either Open, Closed, or All.

    ", + "location":"querystring", + "locationName":"status" + } + } + }, + "GetFindingsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "GetFindingsResponse":{ + "type":"structure", + "members":{ + "findings":{ + "shape":"Findings", + "documentation":"

    A list of findings generated by the specified scan.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    A pagination token. You can use this in future calls to GetFindings to continue listing results after the current page.

    " + } + } + }, + "GetMetricsSummaryRequest":{ + "type":"structure", + "required":["date"], + "members":{ + "date":{ + "shape":"Timestamp", + "documentation":"

    The date you want to retrieve summary metrics from, rounded to the nearest day. The date must be within the past two years since metrics data is only stored for two years. If a date outside of this range is passed, the response will be empty.

    ", + "location":"querystring", + "locationName":"date" + } + } + }, + "GetMetricsSummaryResponse":{ + "type":"structure", + "members":{ + "metricsSummary":{ + "shape":"MetricsSummary", + "documentation":"

    The summary metrics from the specified date.

    " + } + } + }, + "GetScanRequest":{ + "type":"structure", + "required":["scanName"], + "members":{ + "runId":{ + "shape":"Uuid", + "documentation":"

    UUID that identifies the individual scan run you want to view details about. You retrieve this when you call the CreateScan operation. Defaults to the latest scan run if missing.

    ", + "location":"querystring", + "locationName":"runId" + }, + "scanName":{ + "shape":"ScanName", + "documentation":"

    The name of the scan you want to view details about.

    ", + "location":"uri", + "locationName":"scanName" + } + } + }, + "GetScanResponse":{ + "type":"structure", + "required":[ + "analysisType", + "createdAt", + "runId", + "scanName", + "scanState" + ], + "members":{ + "analysisType":{ + "shape":"AnalysisType", + "documentation":"

    The type of analysis CodeGuru Security performed in the scan, either Security or All. The Security type only generates findings related to security. The All type generates both security findings and quality findings.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The time the scan was created.

    " + }, + "numberOfRevisions":{ + "shape":"Long", + "documentation":"

    The number of times a scan has been re-run on a revised resource.

    " + }, + "runId":{ + "shape":"Uuid", + "documentation":"

    UUID that identifies the individual scan run.

    " + }, + "scanName":{ + "shape":"ScanName", + "documentation":"

    The name of the scan.

    " + }, + "scanNameArn":{ + "shape":"ScanNameArn", + "documentation":"

    The ARN for the scan name.

    " + }, + "scanState":{ + "shape":"ScanState", + "documentation":"

    The current state of the scan. Pass either InProgress, Successful, or Failed.

    " + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

    The time when the scan was last updated. Only available for STANDARD scan types.

    " + } + } + }, + "HeaderKey":{ + "type":"string", + "min":1 + }, + "HeaderValue":{ + "type":"string", + "min":1 + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "error":{ + "shape":"String", + "documentation":"

    The internal error encountered by the server.

    " + }, + "message":{ + "shape":"String", + "documentation":"

    Description of the error.

    " + } + }, + "documentation":"

    The server encountered an internal error and is unable to complete the request.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "KmsKeyArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^arn:aws:kms:[\\S]+:[\\d]{12}:key\\/(([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})|(mrk-[0-9a-zA-Z]{32}))$" + }, + "ListFindingsMetricsRequest":{ + "type":"structure", + "required":[ + "endDate", + "startDate" + ], + "members":{ + "endDate":{ + "shape":"Timestamp", + "documentation":"

    The end date of the interval which you want to retrieve metrics from.

    ", + "location":"querystring", + "locationName":"endDate" + }, + "maxResults":{ + "shape":"ListFindingsMetricsRequestMaxResultsInteger", + "documentation":"

    The maximum number of results to return in the response. Use this parameter when paginating results. If additional results exist beyond the number you specify, the nextToken element is returned in the response. Use nextToken in a subsequent request to retrieve additional results.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after the first page.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "startDate":{ + "shape":"Timestamp", + "documentation":"

    The start date of the interval which you want to retrieve metrics from.

    ", + "location":"querystring", + "locationName":"startDate" + } + } + }, + "ListFindingsMetricsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ListFindingsMetricsResponse":{ + "type":"structure", + "members":{ + "findingsMetrics":{ + "shape":"FindingsMetricList", + "documentation":"

    A list of AccountFindingsMetric objects retrieved from the specified time interval.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    A pagination token. You can use this in future calls to ListFindingMetrics to continue listing results after the current page.

    " + } + } + }, + "ListScansRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListScansRequestMaxResultsInteger", + "documentation":"

    The maximum number of results to return in the response. Use this parameter when paginating results. If additional results exist beyond the number you specify, the nextToken element is returned in the response. Use nextToken in a subsequent request to retrieve additional results.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after the first page.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListScansRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListScansResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

    A pagination token. You can use this in future calls to ListScans to continue listing results after the current page.

    " + }, + "summaries":{ + "shape":"ScanSummaries", + "documentation":"

    A list of ScanSummary objects with information about all scans in an account.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"ScanNameArn", + "documentation":"

    The ARN of the ScanName object. You can retrieve this ARN by calling ListScans or GetScan.

    ", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

    An array of key-value pairs used to tag an existing scan. A tag is a custom attribute label with two parts:

    • A tag key. For example, CostCenter, Environment, or Secret. Tag keys are case sensitive.

    • An optional tag value field. For example, 111122223333, Production, or a team name. Omitting the tag value is the same as using an empty string. Tag values are case sensitive.

    " + } + } + }, + "Long":{ + "type":"long", + "box":true + }, + "MetricsSummary":{ + "type":"structure", + "members":{ + "categoriesWithMostFindings":{ + "shape":"CategoriesWithMostFindings", + "documentation":"

    A list of CategoryWithFindingNum objects for the top 5 finding categories with the most open findings in an account.

    " + }, + "date":{ + "shape":"Timestamp", + "documentation":"

    The date from which the metrics summary information was retrieved.

    " + }, + "openFindings":{ + "shape":"FindingMetricsValuePerSeverity", + "documentation":"

    The number of open findings of each severity in an account.

    " + }, + "scansWithMostOpenCriticalFindings":{ + "shape":"ScansWithMostOpenCriticalFindings", + "documentation":"

    A list of ScanNameWithFindingNum objects for the top 3 scans with the most number of open findings in an account.

    " + }, + "scansWithMostOpenFindings":{ + "shape":"ScansWithMostOpenFindings", + "documentation":"

    A list of ScanNameWithFindingNum objects for the top 3 scans with the most number of open critical findings in an account.

    " + } + }, + "documentation":"

    Information about summary metrics in an account.

    " + }, + "NextToken":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^[\\S]+$" + }, + "Recommendation":{ + "type":"structure", + "members":{ + "text":{ + "shape":"String", + "documentation":"

    The recommended course of action to remediate the finding.

    " + }, + "url":{ + "shape":"String", + "documentation":"

    The URL address to the recommendation for remediating the finding.

    " + } + }, + "documentation":"

    Information about the recommended course of action to remediate a finding.

    " + }, + "ReferenceUrls":{ + "type":"list", + "member":{"shape":"String"} + }, + "RelatedVulnerabilities":{ + "type":"list", + "member":{"shape":"String"} + }, + "Remediation":{ + "type":"structure", + "members":{ + "recommendation":{ + "shape":"Recommendation", + "documentation":"

    An object that contains information about the recommended course of action to remediate a finding.

    " + }, + "suggestedFixes":{ + "shape":"SuggestedFixes", + "documentation":"

    A list of SuggestedFix objects. Each object contains information about a suggested code fix to remediate the finding.

    " + } + }, + "documentation":"

    Information about how to remediate a finding.

    " + }, + "RequestHeaderMap":{ + "type":"map", + "key":{"shape":"HeaderKey"}, + "value":{"shape":"HeaderValue"}, + "sensitive":true + }, + "Resource":{ + "type":"structure", + "members":{ + "id":{ + "shape":"String", + "documentation":"

    The identifier for the resource.

    " + }, + "subResourceId":{ + "shape":"String", + "documentation":"

    The identifier for a section of the resource, such as an AWS Lambda layer.

    " + } + }, + "documentation":"

    Information about a resource, such as an Amazon S3 bucket or AWS Lambda function, that contains a finding.

    " + }, + "ResourceId":{ + "type":"structure", + "members":{ + "codeArtifactId":{ + "shape":"Uuid", + "documentation":"

    The identifier for the code file uploaded to the resource where a finding was detected.

    " + } + }, + "documentation":"

    The identifier for a resource object that contains resources where a finding was detected.

    ", + "union":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "errorCode", + "message", + "resourceId", + "resourceType" + ], + "members":{ + "errorCode":{ + "shape":"String", + "documentation":"

    The identifier for the error.

    " + }, + "message":{ + "shape":"String", + "documentation":"

    Description of the error.

    " + }, + "resourceId":{ + "shape":"String", + "documentation":"

    The identifier for the resource that was not found.

    " + }, + "resourceType":{ + "shape":"String", + "documentation":"

    The type of resource that was not found.

    " + } + }, + "documentation":"

    The resource specified in the request was not found.

    ", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "S3Url":{ + "type":"string", + "min":1, + "sensitive":true + }, + "ScanName":{ + "type":"string", + "max":140, + "min":1, + "pattern":"^[a-zA-Z0-9-_$:.]*$" + }, + "ScanNameArn":{ + "type":"string", + "max":300, + "min":1, + "pattern":"^arn:aws:codeguru-security:[\\S]+:[\\d]{12}:scans\\/[a-zA-Z0-9-_$:.]*$" + }, + "ScanNameWithFindingNum":{ + "type":"structure", + "members":{ + "findingNumber":{ + "shape":"Integer", + "documentation":"

    The number of open findings generated by a scan.

    " + }, + "scanName":{ + "shape":"String", + "documentation":"

    The name of the scan.

    " + } + }, + "documentation":"

    Information about a scan with open findings.

    " + }, + "ScanState":{ + "type":"string", + "enum":[ + "InProgress", + "Successful", + "Failed" + ] + }, + "ScanSummaries":{ + "type":"list", + "member":{"shape":"ScanSummary"} + }, + "ScanSummary":{ + "type":"structure", + "required":[ + "createdAt", + "runId", + "scanName", + "scanState" + ], + "members":{ + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The time when the scan was created.

    " + }, + "runId":{ + "shape":"Uuid", + "documentation":"

    The identifier for the scan run.

    " + }, + "scanName":{ + "shape":"ScanName", + "documentation":"

    The name of the scan.

    " + }, + "scanNameArn":{ + "shape":"ScanNameArn", + "documentation":"

    The ARN for the scan name.

    " + }, + "scanState":{ + "shape":"ScanState", + "documentation":"

    The state of the scan. A scan can be In Progress, Complete, or Failed.

    " + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

    The time the scan was last updated. A scan is updated when it is re-run.

    " + } + }, + "documentation":"

    Information about a scan.

    " + }, + "ScanType":{ + "type":"string", + "enum":[ + "Standard", + "Express" + ] + }, + "ScansWithMostOpenCriticalFindings":{ + "type":"list", + "member":{"shape":"ScanNameWithFindingNum"}, + "max":3, + "min":0 + }, + "ScansWithMostOpenFindings":{ + "type":"list", + "member":{"shape":"ScanNameWithFindingNum"}, + "max":3, + "min":0 + }, + "Severity":{ + "type":"string", + "enum":[ + "Critical", + "High", + "Medium", + "Low", + "Info" + ] + }, + "Status":{ + "type":"string", + "enum":[ + "Closed", + "Open", + "All" + ] + }, + "String":{"type":"string"}, + "SuggestedFix":{ + "type":"structure", + "members":{ + "code":{ + "shape":"String", + "documentation":"

    The suggested code to add to your file.

    " + }, + "description":{ + "shape":"String", + "documentation":"

    A description of the suggested code fix and why it is being suggested.

    " + } + }, + "documentation":"

    Information about the suggested code fix to remediate a finding.

    " + }, + "SuggestedFixes":{ + "type":"list", + "member":{"shape":"SuggestedFix"} + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"ScanNameArn", + "documentation":"

    The ARN of the ScanName object. You can retrieve this ARN by calling ListScans or GetScan.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    An array of key-value pairs used to tag an existing scan. A tag is a custom attribute label with two parts:

    • A tag key. For example, CostCenter, Environment, or Secret. Tag keys are case sensitive.

    • An optional tag value field. For example, 111122223333, Production, or a team name. Omitting the tag value is the same as using an empty string. Tag values are case sensitive.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "ThrottlingException":{ + "type":"structure", + "required":[ + "errorCode", + "message" + ], + "members":{ + "errorCode":{ + "shape":"String", + "documentation":"

    The identifier for the error.

    " + }, + "message":{ + "shape":"String", + "documentation":"

    Description of the error.

    " + }, + "quotaCode":{ + "shape":"String", + "documentation":"

    The identifier for the originating quota.

    " + }, + "serviceCode":{ + "shape":"String", + "documentation":"

    The identifier for the originating service.

    " + } + }, + "documentation":"

    The request was denied due to request throttling.

    ", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "Timestamp":{"type":"timestamp"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"ScanNameArn", + "documentation":"

    The ARN of the ScanName object. You can retrieve this ARN by calling ListScans or GetScan.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

    A list of keys for each tag you want to remove from a scan.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateAccountConfigurationRequest":{ + "type":"structure", + "required":["encryptionConfig"], + "members":{ + "encryptionConfig":{ + "shape":"EncryptionConfig", + "documentation":"

    The KMS key ARN you want to use for encryption. Defaults to service-side encryption if missing.

    " + } + } + }, + "UpdateAccountConfigurationResponse":{ + "type":"structure", + "required":["encryptionConfig"], + "members":{ + "encryptionConfig":{ + "shape":"EncryptionConfig", + "documentation":"

    An EncryptionConfig object that contains the KMS key ARN to use for encryption.

    " + } + } + }, + "Uuid":{ + "type":"string", + "pattern":"^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" + }, + "ValidationException":{ + "type":"structure", + "required":[ + "errorCode", + "message", + "reason" + ], + "members":{ + "errorCode":{ + "shape":"String", + "documentation":"

    The identifier for the error.

    " + }, + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

    The field that caused the error, if applicable.

    " + }, + "message":{ + "shape":"String", + "documentation":"

    Description of the error.

    " + }, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

    The reason the request failed validation.

    " + } + }, + "documentation":"

    The input fails to satisfy the specified constraints.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "message", + "name" + ], + "members":{ + "message":{ + "shape":"String", + "documentation":"

    Describes the exception.

    " + }, + "name":{ + "shape":"String", + "documentation":"

    The name of the exception.

    " + } + }, + "documentation":"

    Information about a validation exception.

    " + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "unknownOperation", + "cannotParse", + "fieldValidationFailed", + "other", + "lambdaCodeShaMisMatch" + ] + }, + "Vulnerability":{ + "type":"structure", + "members":{ + "filePath":{ + "shape":"FilePath", + "documentation":"

    An object that describes the location of the detected security vulnerability in your code.

    " + }, + "id":{ + "shape":"String", + "documentation":"

    The identifier for the vulnerability.

    " + }, + "itemCount":{ + "shape":"Integer", + "documentation":"

    The number of times the vulnerability appears in your code.

    " + }, + "referenceUrls":{ + "shape":"ReferenceUrls", + "documentation":"

    One or more URL addresses that contain details about a vulnerability.

    " + }, + "relatedVulnerabilities":{ + "shape":"RelatedVulnerabilities", + "documentation":"

    One or more vulnerabilities that are related to the vulnerability being described.

    " + } + }, + "documentation":"

    Information about a security vulnerability that Amazon CodeGuru Security detected.

    " + } + }, + "documentation":"

    This section provides documentation for the Amazon CodeGuru Security API operations. CodeGuru Security is a service that uses program analysis and machine learning to detect security policy violations and vulnerabilities, and recommends ways to address these security risks.

    By proactively detecting and providing recommendations for addressing security risks, CodeGuru Security improves the overall security of your application code. For more information about CodeGuru Security, see the Amazon CodeGuru Security User Guide.

    " +} diff --git a/services/codepipeline/pom.xml b/services/codepipeline/pom.xml index 13c8d72142e6..b2f2912628d2 100644 --- a/services/codepipeline/pom.xml +++ b/services/codepipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT codepipeline AWS Java SDK :: Services :: AWS CodePipeline diff --git a/services/codestar/pom.xml b/services/codestar/pom.xml index 58c97ea36828..8cdd48d0177a 100644 --- a/services/codestar/pom.xml +++ b/services/codestar/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT codestar AWS Java SDK :: Services :: AWS CodeStar diff --git a/services/codestarconnections/pom.xml b/services/codestarconnections/pom.xml index 4a60bf792eb3..522b5d41c6fb 100644 --- a/services/codestarconnections/pom.xml +++ b/services/codestarconnections/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT codestarconnections AWS Java SDK :: Services :: CodeStar connections diff --git a/services/codestarnotifications/pom.xml b/services/codestarnotifications/pom.xml index a73bd4e4ad65..92c0f483faa7 100644 --- a/services/codestarnotifications/pom.xml +++ b/services/codestarnotifications/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT codestarnotifications AWS Java SDK :: Services :: Codestar Notifications diff --git a/services/cognitoidentity/pom.xml b/services/cognitoidentity/pom.xml index 349f6c8dff64..4a3776099078 100644 --- a/services/cognitoidentity/pom.xml +++ b/services/cognitoidentity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT cognitoidentity AWS Java SDK :: Services :: Amazon Cognito Identity diff --git a/services/cognitoidentityprovider/pom.xml b/services/cognitoidentityprovider/pom.xml index e9c61325cd68..3dfab17b8d1f 100644 --- a/services/cognitoidentityprovider/pom.xml +++ b/services/cognitoidentityprovider/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT cognitoidentityprovider AWS Java SDK :: Services :: Amazon Cognito Identity Provider Service diff --git a/services/cognitosync/pom.xml b/services/cognitosync/pom.xml index 29a2d8ab1d8d..8f9a913166c9 100644 --- a/services/cognitosync/pom.xml +++ b/services/cognitosync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT cognitosync AWS Java SDK :: Services :: Amazon Cognito Sync diff --git a/services/comprehend/pom.xml b/services/comprehend/pom.xml index 83ffa23368a8..66ba4ce8c526 100644 --- a/services/comprehend/pom.xml +++ b/services/comprehend/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 comprehend diff --git a/services/comprehendmedical/pom.xml b/services/comprehendmedical/pom.xml index 1fd0390929d1..6cffe22fdc56 100644 --- a/services/comprehendmedical/pom.xml +++ b/services/comprehendmedical/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT comprehendmedical AWS Java SDK :: Services :: ComprehendMedical diff --git a/services/computeoptimizer/pom.xml b/services/computeoptimizer/pom.xml index e4d61258af22..d195fd81f4e6 100644 --- a/services/computeoptimizer/pom.xml +++ b/services/computeoptimizer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT computeoptimizer AWS Java SDK :: Services :: Compute Optimizer diff --git a/services/config/pom.xml b/services/config/pom.xml index fec404263666..8ff422a8a02f 100644 --- a/services/config/pom.xml +++ b/services/config/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT config AWS Java SDK :: Services :: AWS Config diff --git a/services/config/src/main/resources/codegen-resources/service-2.json b/services/config/src/main/resources/codegen-resources/service-2.json index 0735bf9921fc..5b5ffb3fda3b 100644 --- a/services/config/src/main/resources/codegen-resources/service-2.json +++ b/services/config/src/main/resources/codegen-resources/service-2.json @@ -7456,7 +7456,28 @@ "AWS::Redshift::ScheduledAction", "AWS::Route53Resolver::FirewallRuleGroupAssociation", "AWS::SageMaker::AppImageConfig", - "AWS::SageMaker::Image" + "AWS::SageMaker::Image", + "AWS::ECS::TaskSet", + "AWS::Cassandra::Keyspace", + "AWS::Signer::SigningProfile", + "AWS::Amplify::App", + "AWS::AppMesh::VirtualNode", + "AWS::AppMesh::VirtualService", + "AWS::AppRunner::VpcConnector", + "AWS::AppStream::Application", + "AWS::CodeArtifact::Repository", + "AWS::EC2::PrefixList", + "AWS::EC2::SpotFleet", + "AWS::Evidently::Project", + "AWS::Forecast::Dataset", + "AWS::IAM::SAMLProvider", + "AWS::IAM::ServerCertificate", + "AWS::Pinpoint::Campaign", + "AWS::Pinpoint::InAppTemplate", + "AWS::SageMaker::Domain", + "AWS::Transfer::Agreement", + "AWS::Transfer::Connector", + "AWS::KinesisFirehose::DeliveryStream" ] }, "ResourceTypeList":{ diff --git a/services/connect/pom.xml b/services/connect/pom.xml index 61fff026b813..b86eccbeff72 100644 --- a/services/connect/pom.xml +++ b/services/connect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT connect AWS Java SDK :: Services :: Connect diff --git a/services/connect/src/main/resources/codegen-resources/paginators-1.json b/services/connect/src/main/resources/codegen-resources/paginators-1.json index 230d2e1b39b2..e6c58c5cc27a 100644 --- a/services/connect/src/main/resources/codegen-resources/paginators-1.json +++ b/services/connect/src/main/resources/codegen-resources/paginators-1.json @@ -228,6 +228,24 @@ "output_token": "NextToken", "result_key": "AvailableNumbersList" }, + "SearchHoursOfOperations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "non_aggregate_keys": [ + "ApproximateTotalCount" + ], + "output_token": "NextToken", + "result_key": "HoursOfOperations" + }, + "SearchPrompts": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "non_aggregate_keys": [ + "ApproximateTotalCount" + ], + "output_token": "NextToken", + "result_key": "Prompts" + }, "SearchQueues": { "input_token": "NextToken", "limit_key": "MaxResults", @@ -237,6 +255,15 @@ "output_token": "NextToken", "result_key": "Queues" }, + "SearchQuickConnects": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "non_aggregate_keys": [ + "ApproximateTotalCount" + ], + "output_token": "NextToken", + "result_key": "QuickConnects" + }, "SearchRoutingProfiles": { "input_token": "NextToken", "limit_key": "MaxResults", diff --git a/services/connect/src/main/resources/codegen-resources/service-2.json b/services/connect/src/main/resources/codegen-resources/service-2.json index d3ce50dd21ba..4a94f31edd0e 100644 --- a/services/connect/src/main/resources/codegen-resources/service-2.json +++ b/services/connect/src/main/resources/codegen-resources/service-2.json @@ -2254,7 +2254,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    When a contact is being recorded, and the recording has been suspended using SuspendContactRecording, this API resumes recording the call.

    Only voice recordings are supported at this time.

    " + "documentation":"

    When a contact is being recorded, and the recording has been suspended using SuspendContactRecording, this API resumes recording the call or screen.

    Voice and screen recordings are supported.

    " }, "SearchAvailablePhoneNumbers":{ "name":"SearchAvailablePhoneNumbers", @@ -2272,6 +2272,40 @@ ], "documentation":"

    Searches for available phone numbers that you can claim to your Amazon Connect instance or traffic distribution group. If the provided TargetArn is a traffic distribution group, you can call this API in both Amazon Web Services Regions associated with the traffic distribution group.

    " }, + "SearchHoursOfOperations":{ + "name":"SearchHoursOfOperations", + "http":{ + "method":"POST", + "requestUri":"/search-hours-of-operations" + }, + "input":{"shape":"SearchHoursOfOperationsRequest"}, + "output":{"shape":"SearchHoursOfOperationsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

    Searches the hours of operation in an Amazon Connect instance, with optional filtering.

    " + }, + "SearchPrompts":{ + "name":"SearchPrompts", + "http":{ + "method":"POST", + "requestUri":"/search-prompts" + }, + "input":{"shape":"SearchPromptsRequest"}, + "output":{"shape":"SearchPromptsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

    Searches prompts in an Amazon Connect instance, with optional filtering.

    " + }, "SearchQueues":{ "name":"SearchQueues", "http":{ @@ -2289,6 +2323,23 @@ ], "documentation":"

    This API is in preview release for Amazon Connect and is subject to change.

    Searches queues in an Amazon Connect instance, with optional filtering.

    " }, + "SearchQuickConnects":{ + "name":"SearchQuickConnects", + "http":{ + "method":"POST", + "requestUri":"/search-quick-connects" + }, + "input":{"shape":"SearchQuickConnectsRequest"}, + "output":{"shape":"SearchQuickConnectsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

    Searches quick connects in an Amazon Connect instance, with optional filtering.

    " + }, "SearchRoutingProfiles":{ "name":"SearchRoutingProfiles", "http":{ @@ -2540,7 +2591,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    When a contact is being recorded, this API suspends recording the call. For example, you might suspend the call recording while collecting sensitive information, such as a credit card number. Then use ResumeContactRecording to restart recording.

    The period of time that the recording is suspended is filled with silence in the final recording.

    Only voice recordings are supported at this time.

    " + "documentation":"

    When a contact is being recorded, this API suspends recording the call or screen. For example, you might suspend the call or screen recording while collecting sensitive information, such as a credit card number. Then use ResumeContactRecording to restart recording.

    The period of time that the recording is suspended is filled with silence in the final recording.

    Voice and screen recordings are supported.

    " }, "TagResource":{ "name":"TagResource", @@ -8947,7 +8998,40 @@ "min":1 }, "HoursOfOperationId":{"type":"string"}, + "HoursOfOperationList":{ + "type":"list", + "member":{"shape":"HoursOfOperation"} + }, "HoursOfOperationName":{"type":"string"}, + "HoursOfOperationSearchConditionList":{ + "type":"list", + "member":{"shape":"HoursOfOperationSearchCriteria"} + }, + "HoursOfOperationSearchCriteria":{ + "type":"structure", + "members":{ + "OrConditions":{ + "shape":"HoursOfOperationSearchConditionList", + "documentation":"

    A list of conditions which would be applied together with an OR condition.

    " + }, + "AndConditions":{ + "shape":"HoursOfOperationSearchConditionList", + "documentation":"

    A list of conditions which would be applied together with an AND condition.

    " + }, + "StringCondition":{ + "shape":"StringCondition", + "documentation":"

    A leaf node condition which can be used to specify a string condition.

    The currently supported values for FieldName are name, description, timezone, and resourceID.

    " + } + }, + "documentation":"

    The search criteria to be used to return hours of operations.

    " + }, + "HoursOfOperationSearchFilter":{ + "type":"structure", + "members":{ + "TagFilter":{"shape":"ControlPlaneTagFilter"} + }, + "documentation":"

    Filters to be applied to search results.

    " + }, "HoursOfOperationSummary":{ "type":"structure", "members":{ @@ -9043,6 +9127,10 @@ "OutboundCallsEnabled":{ "shape":"OutboundCallsEnabled", "documentation":"

    Whether outbound calls are enabled.

    " + }, + "InstanceAccessUrl":{ + "shape":"Url", + "documentation":"

    This URL allows contact center users to access Amazon Connect admin website.

    " } }, "documentation":"

    The Amazon Connect instance.

    " @@ -9146,7 +9234,8 @@ "AGENT_EVENTS", "REAL_TIME_CONTACT_ANALYSIS_SEGMENTS", "ATTACHMENTS", - "CONTACT_EVALUATIONS" + "CONTACT_EVALUATIONS", + "SCREEN_RECORDINGS" ] }, "InstanceSummary":{ @@ -9187,6 +9276,10 @@ "OutboundCallsEnabled":{ "shape":"OutboundCallsEnabled", "documentation":"

    Whether outbound calls are enabled.

    " + }, + "InstanceAccessUrl":{ + "shape":"Url", + "documentation":"

    This URL allows contact center users to access Amazon Connect admin website.

    " } }, "documentation":"

    Information about the instance.

    " @@ -11818,7 +11911,7 @@ }, "Description":{ "shape":"PromptDescription", - "documentation":"

    A description for the prompt.

    " + "documentation":"

    The description of the prompt.

    " }, "Tags":{ "shape":"TagMap", @@ -11837,6 +11930,10 @@ "max":256, "min":1 }, + "PromptList":{ + "type":"list", + "member":{"shape":"Prompt"} + }, "PromptName":{ "type":"string", "max":256, @@ -11847,6 +11944,35 @@ "max":2000, "min":1 }, + "PromptSearchConditionList":{ + "type":"list", + "member":{"shape":"PromptSearchCriteria"} + }, + "PromptSearchCriteria":{ + "type":"structure", + "members":{ + "OrConditions":{ + "shape":"PromptSearchConditionList", + "documentation":"

    A list of conditions which would be applied together with an OR condition.

    " + }, + "AndConditions":{ + "shape":"PromptSearchConditionList", + "documentation":"

    A list of conditions which would be applied together with an AND condition.

    " + }, + "StringCondition":{ + "shape":"StringCondition", + "documentation":"

    A leaf node condition which can be used to specify a string condition.

    The currently supported values for FieldName are name, description, and resourceID.

    " + } + }, + "documentation":"

    The search criteria to be used to return prompts.

    " + }, + "PromptSearchFilter":{ + "type":"structure", + "members":{ + "TagFilter":{"shape":"ControlPlaneTagFilter"} + }, + "documentation":"

    Filters to be applied to search results.

    " + }, "PromptSummary":{ "type":"structure", "members":{ @@ -12068,7 +12194,10 @@ "shape":"QueueSearchConditionList", "documentation":"

    A list of conditions which would be applied together with an AND condition.

    " }, - "StringCondition":{"shape":"StringCondition"}, + "StringCondition":{ + "shape":"StringCondition", + "documentation":"

    A leaf node condition which can be used to specify a string condition.

    The currently supported values for FieldName are name, description, and resourceID.

    " + }, "QueueTypeCondition":{ "shape":"SearchableQueueType", "documentation":"

    The type of queue.

    " @@ -12202,6 +12331,39 @@ "max":127, "min":1 }, + "QuickConnectSearchConditionList":{ + "type":"list", + "member":{"shape":"QuickConnectSearchCriteria"} + }, + "QuickConnectSearchCriteria":{ + "type":"structure", + "members":{ + "OrConditions":{ + "shape":"QuickConnectSearchConditionList", + "documentation":"

    A list of conditions which would be applied together with an OR condition.

    " + }, + "AndConditions":{ + "shape":"QuickConnectSearchConditionList", + "documentation":"

    A list of conditions which would be applied together with an AND condition.

    " + }, + "StringCondition":{ + "shape":"StringCondition", + "documentation":"

    A leaf node condition which can be used to specify a string condition.

    The currently supported values for FieldName are name, description, and resourceID.

    " + } + }, + "documentation":"

    The search criteria to be used to return quick connects.

    " + }, + "QuickConnectSearchFilter":{ + "type":"structure", + "members":{ + "TagFilter":{"shape":"ControlPlaneTagFilter"} + }, + "documentation":"

    Filters to be applied to search results.

    " + }, + "QuickConnectSearchSummaryList":{ + "type":"list", + "member":{"shape":"QuickConnect"} + }, "QuickConnectSummary":{ "type":"structure", "members":{ @@ -12715,7 +12877,10 @@ "shape":"RoutingProfileSearchConditionList", "documentation":"

    A list of conditions which would be applied together with an AND condition.

    " }, - "StringCondition":{"shape":"StringCondition"} + "StringCondition":{ + "shape":"StringCondition", + "documentation":"

    A leaf node condition which can be used to specify a string condition.

    The currently supported values for FieldName are name, description, and resourceID.

    " + } }, "documentation":"

    The search criteria to be used to return routing profiles.

    The name and description fields support \"contains\" queries with a minimum of 2 characters and a maximum of 25 characters. Any queries with character lengths outside of this range will throw invalid results.

    " }, @@ -13009,6 +13174,94 @@ } } }, + "SearchHoursOfOperationsRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

    The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

    " + }, + "NextToken":{ + "shape":"NextToken2500", + "documentation":"

    The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

    " + }, + "MaxResults":{ + "shape":"MaxResult100", + "documentation":"

    The maximum number of results to return per page.

    ", + "box":true + }, + "SearchFilter":{ + "shape":"HoursOfOperationSearchFilter", + "documentation":"

    Filters to be applied to search results.

    " + }, + "SearchCriteria":{ + "shape":"HoursOfOperationSearchCriteria", + "documentation":"

    The search criteria to be used to return hours of operations.

    " + } + } + }, + "SearchHoursOfOperationsResponse":{ + "type":"structure", + "members":{ + "HoursOfOperations":{ + "shape":"HoursOfOperationList", + "documentation":"

    Information about the hours of operations.

    " + }, + "NextToken":{ + "shape":"NextToken2500", + "documentation":"

    If there are additional results, this is the token for the next set of results.

    " + }, + "ApproximateTotalCount":{ + "shape":"ApproximateTotalCount", + "documentation":"

    The total number of hours of operations which matched your search query.

    " + } + } + }, + "SearchPromptsRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

    The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

    " + }, + "NextToken":{ + "shape":"NextToken2500", + "documentation":"

    The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

    " + }, + "MaxResults":{ + "shape":"MaxResult100", + "documentation":"

    The maximum number of results to return per page.

    ", + "box":true + }, + "SearchFilter":{ + "shape":"PromptSearchFilter", + "documentation":"

    Filters to be applied to search results.

    " + }, + "SearchCriteria":{ + "shape":"PromptSearchCriteria", + "documentation":"

    The search criteria to be used to return prompts.

    " + } + } + }, + "SearchPromptsResponse":{ + "type":"structure", + "members":{ + "Prompts":{ + "shape":"PromptList", + "documentation":"

    Information about the prompts.

    " + }, + "NextToken":{ + "shape":"NextToken2500", + "documentation":"

    If there are additional results, this is the token for the next set of results.

    " + }, + "ApproximateTotalCount":{ + "shape":"ApproximateTotalCount", + "documentation":"

    The total number of quick connects which matched your search query.

    " + } + } + }, "SearchQueuesRequest":{ "type":"structure", "required":["InstanceId"], @@ -13053,6 +13306,50 @@ } } }, + "SearchQuickConnectsRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

    The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

    " + }, + "NextToken":{ + "shape":"NextToken2500", + "documentation":"

    The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

    " + }, + "MaxResults":{ + "shape":"MaxResult100", + "documentation":"

    The maximum number of results to return per page.

    ", + "box":true + }, + "SearchFilter":{ + "shape":"QuickConnectSearchFilter", + "documentation":"

    Filters to be applied to search results.

    " + }, + "SearchCriteria":{ + "shape":"QuickConnectSearchCriteria", + "documentation":"

    The search criteria to be used to return quick connects.

    " + } + } + }, + "SearchQuickConnectsResponse":{ + "type":"structure", + "members":{ + "QuickConnects":{ + "shape":"QuickConnectSearchSummaryList", + "documentation":"

    Information about the quick connects.

    " + }, + "NextToken":{ + "shape":"NextToken2500", + "documentation":"

    If there are additional results, this is the token for the next set of results.

    " + }, + "ApproximateTotalCount":{ + "shape":"ApproximateTotalCount", + "documentation":"

    The total number of quick connects which matched your search query.

    " + } + } + }, "SearchRoutingProfilesRequest":{ "type":"structure", "required":["InstanceId"], @@ -13941,7 +14238,7 @@ "documentation":"

    The type of comparison to be made when evaluating the string condition.

    " } }, - "documentation":"

    A leaf node condition which can be used to specify a string condition.

    The currently supported value for FieldName: name

    " + "documentation":"

    A leaf node condition which can be used to specify a string condition.

    " }, "StringReference":{ "type":"structure", @@ -16211,7 +16508,7 @@ }, "StringCondition":{ "shape":"StringCondition", - "documentation":"

    A leaf node condition which can be used to specify a string condition.

    " + "documentation":"

    A leaf node condition which can be used to specify a string condition.

    The currently supported values for FieldName are name, description, and resourceID.

    " }, "HierarchyGroupCondition":{ "shape":"HierarchyGroupCondition", diff --git a/services/connectcampaigns/pom.xml b/services/connectcampaigns/pom.xml index 474597c43693..0129c7c9a2de 100644 --- a/services/connectcampaigns/pom.xml +++ b/services/connectcampaigns/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT connectcampaigns AWS Java SDK :: Services :: Connect Campaigns diff --git a/services/connectcases/pom.xml b/services/connectcases/pom.xml index dc70878ef883..25e31826db09 100644 --- a/services/connectcases/pom.xml +++ b/services/connectcases/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT connectcases AWS Java SDK :: Services :: Connect Cases diff --git a/services/connectcontactlens/pom.xml b/services/connectcontactlens/pom.xml index a285653898fa..4587df5c5dd0 100644 --- a/services/connectcontactlens/pom.xml +++ b/services/connectcontactlens/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT connectcontactlens AWS Java SDK :: Services :: Connect Contact Lens diff --git a/services/connectparticipant/pom.xml b/services/connectparticipant/pom.xml index 6d10614a1756..8c722155e61a 100644 --- a/services/connectparticipant/pom.xml +++ b/services/connectparticipant/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT connectparticipant AWS Java SDK :: Services :: ConnectParticipant diff --git a/services/controltower/pom.xml b/services/controltower/pom.xml index cdc140dc445a..30a26e468084 100644 --- a/services/controltower/pom.xml +++ b/services/controltower/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT controltower AWS Java SDK :: Services :: Control Tower diff --git a/services/costandusagereport/pom.xml b/services/costandusagereport/pom.xml index d51ff3b43582..2d74229353fe 100644 --- a/services/costandusagereport/pom.xml +++ b/services/costandusagereport/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT costandusagereport AWS Java SDK :: Services :: AWS Cost and Usage Report diff --git a/services/costexplorer/pom.xml b/services/costexplorer/pom.xml index d2d8abbc93f4..1e772d3b0991 100644 --- a/services/costexplorer/pom.xml +++ b/services/costexplorer/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 costexplorer diff --git a/services/customerprofiles/pom.xml b/services/customerprofiles/pom.xml index 0d9e877b46b2..c49a4607202f 100644 --- a/services/customerprofiles/pom.xml +++ b/services/customerprofiles/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT customerprofiles AWS Java SDK :: Services :: Customer Profiles diff --git a/services/databasemigration/pom.xml b/services/databasemigration/pom.xml index 3f33dc3eab0f..dd019702d5ac 100644 --- a/services/databasemigration/pom.xml +++ b/services/databasemigration/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT databasemigration AWS Java SDK :: Services :: AWS Database Migration Service diff --git a/services/databrew/pom.xml b/services/databrew/pom.xml index 1ec2083ff0bb..aa0c1ff61d34 100644 --- a/services/databrew/pom.xml +++ b/services/databrew/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT databrew AWS Java SDK :: Services :: Data Brew diff --git a/services/dataexchange/pom.xml b/services/dataexchange/pom.xml index 3ad8b0bcf7b6..aa453033179d 100644 --- a/services/dataexchange/pom.xml +++ b/services/dataexchange/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT dataexchange AWS Java SDK :: Services :: DataExchange diff --git a/services/datapipeline/pom.xml b/services/datapipeline/pom.xml index 9791a25fcf8b..f55e9c4fff56 100644 --- a/services/datapipeline/pom.xml +++ b/services/datapipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT datapipeline AWS Java SDK :: Services :: AWS Data Pipeline diff --git a/services/datasync/pom.xml b/services/datasync/pom.xml index decfb9df0c3d..1be4ab301141 100644 --- a/services/datasync/pom.xml +++ b/services/datasync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT datasync AWS Java SDK :: Services :: DataSync diff --git a/services/dax/pom.xml b/services/dax/pom.xml index 62131a73560b..b115f22744b1 100644 --- a/services/dax/pom.xml +++ b/services/dax/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT dax AWS Java SDK :: Services :: Amazon DynamoDB Accelerator (DAX) diff --git a/services/detective/pom.xml b/services/detective/pom.xml index 1a2ea47c0368..667b2402433d 100644 --- a/services/detective/pom.xml +++ b/services/detective/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT detective AWS Java SDK :: Services :: Detective diff --git a/services/devicefarm/pom.xml b/services/devicefarm/pom.xml index 86d012fdfe41..2d82fe7392b9 100644 --- a/services/devicefarm/pom.xml +++ b/services/devicefarm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT devicefarm AWS Java SDK :: Services :: AWS Device Farm diff --git a/services/devopsguru/pom.xml b/services/devopsguru/pom.xml index 85ec77e769b4..218e4ca75f2a 100644 --- a/services/devopsguru/pom.xml +++ b/services/devopsguru/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT devopsguru AWS Java SDK :: Services :: Dev Ops Guru diff --git a/services/directconnect/pom.xml b/services/directconnect/pom.xml index fb9cb4d3276f..4e8ef005fb59 100644 --- a/services/directconnect/pom.xml +++ b/services/directconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT directconnect AWS Java SDK :: Services :: AWS Direct Connect diff --git a/services/directory/pom.xml b/services/directory/pom.xml index e328a09f3880..6c94dddb96e6 100644 --- a/services/directory/pom.xml +++ b/services/directory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT directory AWS Java SDK :: Services :: AWS Directory Service diff --git a/services/dlm/pom.xml b/services/dlm/pom.xml index 2325a700f25f..71451c5324b2 100644 --- a/services/dlm/pom.xml +++ b/services/dlm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT dlm AWS Java SDK :: Services :: DLM diff --git a/services/docdb/pom.xml b/services/docdb/pom.xml index 1383c4c8acd6..308d545f685c 100644 --- a/services/docdb/pom.xml +++ b/services/docdb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT docdb AWS Java SDK :: Services :: DocDB diff --git a/services/docdbelastic/pom.xml b/services/docdbelastic/pom.xml index 41ce5f775873..709c1f2ca841 100644 --- a/services/docdbelastic/pom.xml +++ b/services/docdbelastic/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT docdbelastic AWS Java SDK :: Services :: Doc DB Elastic diff --git a/services/drs/pom.xml b/services/drs/pom.xml index 4f8cfeb8f477..d57ba492e039 100644 --- a/services/drs/pom.xml +++ b/services/drs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT drs AWS Java SDK :: Services :: Drs diff --git a/services/drs/src/main/resources/codegen-resources/paginators-1.json b/services/drs/src/main/resources/codegen-resources/paginators-1.json index 3158e95a566f..bf7f0e978df8 100644 --- a/services/drs/src/main/resources/codegen-resources/paginators-1.json +++ b/services/drs/src/main/resources/codegen-resources/paginators-1.json @@ -36,6 +36,12 @@ "limit_key": "maxResults", "result_key": "items" }, + "DescribeSourceNetworks": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "items" + }, "DescribeSourceServers": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/services/drs/src/main/resources/codegen-resources/service-2.json b/services/drs/src/main/resources/codegen-resources/service-2.json index d154f217dc50..db8d44dd9fe7 100644 --- a/services/drs/src/main/resources/codegen-resources/service-2.json +++ b/services/drs/src/main/resources/codegen-resources/service-2.json @@ -13,6 +13,26 @@ "uid":"drs-2020-02-26" }, "operations":{ + "AssociateSourceNetworkStack":{ + "name":"AssociateSourceNetworkStack", + "http":{ + "method":"POST", + "requestUri":"/AssociateSourceNetworkStack", + "responseCode":202 + }, + "input":{"shape":"AssociateSourceNetworkStackRequest"}, + "output":{"shape":"AssociateSourceNetworkStackResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"UninitializedAccountException"} + ], + "documentation":"

    Associate a Source Network to an existing CloudFormation Stack and modify launch templates to use this network. Can be used for reverting to previously deployed CloudFormation stacks.

    " + }, "CreateExtendedSourceServer":{ "name":"CreateExtendedSourceServer", "http":{ @@ -71,6 +91,26 @@ ], "documentation":"

    Creates a new ReplicationConfigurationTemplate.

    " }, + "CreateSourceNetwork":{ + "name":"CreateSourceNetwork", + "http":{ + "method":"POST", + "requestUri":"/CreateSourceNetwork", + "responseCode":201 + }, + "input":{"shape":"CreateSourceNetworkRequest"}, + "output":{"shape":"CreateSourceNetworkResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"UninitializedAccountException"} + ], + "documentation":"

    Create a new Source Network resource for a provided VPC ID.

    " + }, "DeleteJob":{ "name":"DeleteJob", "http":{ @@ -145,6 +185,25 @@ "documentation":"

    Deletes a single Replication Configuration Template by ID

    ", "idempotent":true }, + "DeleteSourceNetwork":{ + "name":"DeleteSourceNetwork", + "http":{ + "method":"POST", + "requestUri":"/DeleteSourceNetwork", + "responseCode":204 + }, + "input":{"shape":"DeleteSourceNetworkRequest"}, + "output":{"shape":"DeleteSourceNetworkResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"UninitializedAccountException"} + ], + "documentation":"

    Delete Source Network resource.

    ", + "idempotent":true + }, "DeleteSourceServer":{ "name":"DeleteSourceServer", "http":{ @@ -269,6 +328,23 @@ ], "documentation":"

    Lists all ReplicationConfigurationTemplates, filtered by Source Server IDs.

    " }, + "DescribeSourceNetworks":{ + "name":"DescribeSourceNetworks", + "http":{ + "method":"POST", + "requestUri":"/DescribeSourceNetworks", + "responseCode":200 + }, + "input":{"shape":"DescribeSourceNetworksRequest"}, + "output":{"shape":"DescribeSourceNetworksResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"UninitializedAccountException"} + ], + "documentation":"

    Lists all Source Networks or multiple Source Networks filtered by ID.

    " + }, "DescribeSourceServers":{ "name":"DescribeSourceServers", "http":{ @@ -322,6 +398,25 @@ ], "documentation":"

    Disconnects a specific Source Server from Elastic Disaster Recovery. Data replication is stopped immediately. All AWS resources created by Elastic Disaster Recovery for enabling the replication of the Source Server will be terminated / deleted within 90 minutes. You cannot disconnect a Source Server if it has a Recovery Instance. If the agent on the Source Server has not been prevented from communicating with the Elastic Disaster Recovery service, then it will receive a command to uninstall itself (within approximately 10 minutes). The following properties of the SourceServer will be changed immediately: dataReplicationInfo.dataReplicationState will be set to DISCONNECTED; The totalStorageBytes property for each of dataReplicationInfo.replicatedDisks will be set to zero; dataReplicationInfo.lagDuration and dataReplicationInfo.lagDuration will be nullified.

    " }, + "ExportSourceNetworkCfnTemplate":{ + "name":"ExportSourceNetworkCfnTemplate", + "http":{ + "method":"POST", + "requestUri":"/ExportSourceNetworkCfnTemplate", + "responseCode":200 + }, + "input":{"shape":"ExportSourceNetworkCfnTemplateRequest"}, + "output":{"shape":"ExportSourceNetworkCfnTemplateResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"UninitializedAccountException"} + ], + "documentation":"

    Export the Source Network CloudFormation template to an S3 bucket.

    " + }, "GetFailbackReplicationConfiguration":{ "name":"GetFailbackReplicationConfiguration", "http":{ @@ -540,6 +635,43 @@ ], "documentation":"

    Starts replication for a stopped Source Server. This action would make the Source Server protected again and restart billing for it.

    " }, + "StartSourceNetworkRecovery":{ + "name":"StartSourceNetworkRecovery", + "http":{ + "method":"POST", + "requestUri":"/StartSourceNetworkRecovery", + "responseCode":202 + }, + "input":{"shape":"StartSourceNetworkRecoveryRequest"}, + "output":{"shape":"StartSourceNetworkRecoveryResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"UninitializedAccountException"} + ], + "documentation":"

    Deploy VPC for the specified Source Network and modify launch templates to use this network. The VPC will be deployed using a dedicated CloudFormation stack.

    " + }, + "StartSourceNetworkReplication":{ + "name":"StartSourceNetworkReplication", + "http":{ + "method":"POST", + "requestUri":"/StartSourceNetworkReplication", + "responseCode":200 + }, + "input":{"shape":"StartSourceNetworkReplicationRequest"}, + "output":{"shape":"StartSourceNetworkReplicationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"UninitializedAccountException"} + ], + "documentation":"

    Starts replication for a Source Network. This action would make the Source Network protected.

    " + }, "StopFailback":{ "name":"StopFailback", "http":{ @@ -574,6 +706,25 @@ ], "documentation":"

    Stops replication for a Source Server. This action would make the Source Server unprotected, delete its existing snapshots and stop billing for it.

    " }, + "StopSourceNetworkReplication":{ + "name":"StopSourceNetworkReplication", + "http":{ + "method":"POST", + "requestUri":"/StopSourceNetworkReplication", + "responseCode":200 + }, + "input":{"shape":"StopSourceNetworkReplicationRequest"}, + "output":{"shape":"StopSourceNetworkReplicationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"UninitializedAccountException"} + ], + "documentation":"

    Stops replication for a Source Network. This action would make the Source Network unprotected.

    " + }, "TagResource":{ "name":"TagResource", "http":{ @@ -773,6 +924,32 @@ "max":50, "min":0 }, + "AssociateSourceNetworkStackRequest":{ + "type":"structure", + "required":[ + "cfnStackName", + "sourceNetworkID" + ], + "members":{ + "cfnStackName":{ + "shape":"CfnStackName", + "documentation":"

    CloudFormation template to associate with a Source Network.

    " + }, + "sourceNetworkID":{ + "shape":"SourceNetworkID", + "documentation":"

    The Source Network ID to associate with CloudFormation template.

    " + } + } + }, + "AssociateSourceNetworkStackResponse":{ + "type":"structure", + "members":{ + "job":{ + "shape":"Job", + "documentation":"

    The Source Network association Job.

    " + } + } + }, "AwsAvailabilityZone":{ "type":"string", "max":255, @@ -808,6 +985,13 @@ }, "documentation":"

    Information about a server's CPU.

    " }, + "CfnStackName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[a-zA-Z][-a-zA-Z0-9]*$", + "sensitive":true + }, "ConflictException":{ "type":"structure", "members":{ @@ -900,6 +1084,10 @@ "shape":"Boolean", "documentation":"

    Copy tags.

    " }, + "exportBucketArn":{ + "shape":"ARN", + "documentation":"

    S3 bucket ARN to export Source Network templates.

    " + }, "launchDisposition":{ "shape":"LaunchDisposition", "documentation":"

    Launch disposition.

    " @@ -1006,6 +1194,41 @@ } } }, + "CreateSourceNetworkRequest":{ + "type":"structure", + "required":[ + "originAccountID", + "originRegion", + "vpcID" + ], + "members":{ + "originAccountID":{ + "shape":"AccountID", + "documentation":"

    Account containing the VPC to protect.

    " + }, + "originRegion":{ + "shape":"AwsRegion", + "documentation":"

    Region containing the VPC to protect.

    " + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

    A set of tags to be associated with the Source Network resource.

    " + }, + "vpcID":{ + "shape":"VpcID", + "documentation":"

    Which VPC ID to protect.

    " + } + } + }, + "CreateSourceNetworkResponse":{ + "type":"structure", + "members":{ + "sourceNetworkID":{ + "shape":"SourceNetworkID", + "documentation":"

    ID of the created Source Network.

    " + } + } + }, "DataReplicationError":{ "type":"structure", "members":{ @@ -1237,6 +1460,21 @@ "members":{ } }, + "DeleteSourceNetworkRequest":{ + "type":"structure", + "required":["sourceNetworkID"], + "members":{ + "sourceNetworkID":{ + "shape":"SourceNetworkID", + "documentation":"

    ID of the Source Network to delete.

    " + } + } + }, + "DeleteSourceNetworkResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteSourceServerRequest":{ "type":"structure", "required":["sourceServerID"], @@ -1498,6 +1736,60 @@ } } }, + "DescribeSourceNetworksRequest":{ + "type":"structure", + "members":{ + "filters":{ + "shape":"DescribeSourceNetworksRequestFilters", + "documentation":"

    A set of filters by which to return Source Networks.

    " + }, + "maxResults":{ + "shape":"StrictlyPositiveInteger", + "documentation":"

    Maximum number of Source Networks to retrieve.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The token of the next Source Networks to retrieve.

    " + } + } + }, + "DescribeSourceNetworksRequestFilters":{ + "type":"structure", + "members":{ + "originAccountID":{ + "shape":"AccountID", + "documentation":"

    Filter Source Networks by account ID containing the protected VPCs.

    " + }, + "originRegion":{ + "shape":"AwsRegion", + "documentation":"

    Filter Source Networks by the region containing the protected VPCs.

    " + }, + "sourceNetworkIDs":{ + "shape":"DescribeSourceNetworksRequestFiltersIDs", + "documentation":"

    An array of Source Network IDs that should be returned. An empty array means all Source Networks.

    " + } + }, + "documentation":"

    A set of filters by which to return Source Networks.

    " + }, + "DescribeSourceNetworksRequestFiltersIDs":{ + "type":"list", + "member":{"shape":"SourceNetworkID"}, + "max":100, + "min":0 + }, + "DescribeSourceNetworksResponse":{ + "type":"structure", + "members":{ + "items":{ + "shape":"SourceNetworksList", + "documentation":"

    An array of Source Networks.

    " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    The token of the next Source Networks to retrieve.

    " + } + } + }, "DescribeSourceServersRequest":{ "type":"structure", "members":{ @@ -1629,6 +1921,36 @@ "min":10, "pattern":"^vol-([0-9a-fA-F]{8}|[0-9a-fA-F]{17})$" }, + "EventResourceData":{ + "type":"structure", + "members":{ + "sourceNetworkData":{ + "shape":"SourceNetworkData", + "documentation":"

    Source Network properties.

    " + } + }, + "documentation":"

    Properties of resource related to a job event.

    ", + "union":true + }, + "ExportSourceNetworkCfnTemplateRequest":{ + "type":"structure", + "required":["sourceNetworkID"], + "members":{ + "sourceNetworkID":{ + "shape":"SourceNetworkID", + "documentation":"

    The Source Network ID to export its CloudFormation template to an S3 bucket.

    " + } + } + }, + "ExportSourceNetworkCfnTemplateResponse":{ + "type":"structure", + "members":{ + "s3DestinationUrl":{ + "shape":"LargeBoundedString", + "documentation":"

    S3 bucket URL where the Source Network CloudFormation template was exported to.

    " + } + } + }, "ExtensionStatus":{ "type":"string", "enum":[ @@ -1789,7 +2111,10 @@ "FAILBACK", "DIAGNOSTIC", "TERMINATE_RECOVERY_INSTANCES", - "TARGET_ACCOUNT" + "TARGET_ACCOUNT", + "CREATE_NETWORK_RECOVERY", + "UPDATE_NETWORK_RECOVERY", + "ASSOCIATE_NETWORK_RECOVERY" ] }, "InternalServerException":{ @@ -1833,6 +2158,10 @@ "shape":"JobID", "documentation":"

    The ID of the Job.

    " }, + "participatingResources":{ + "shape":"ParticipatingResources", + "documentation":"

    A list of resources that the Job is acting upon.

    " + }, "participatingServers":{ "shape":"ParticipatingServers", "documentation":"

    A list of servers that the Job is acting upon.

    " @@ -1895,7 +2224,17 @@ "LAUNCH_START", "LAUNCH_FAILED", "JOB_CANCEL", - "JOB_END" + "JOB_END", + "DEPLOY_NETWORK_CONFIGURATION_START", + "DEPLOY_NETWORK_CONFIGURATION_END", + "DEPLOY_NETWORK_CONFIGURATION_FAILED", + "UPDATE_NETWORK_CONFIGURATION_START", + "UPDATE_NETWORK_CONFIGURATION_END", + "UPDATE_NETWORK_CONFIGURATION_FAILED", + "UPDATE_LAUNCH_TEMPLATE_START", + "UPDATE_LAUNCH_TEMPLATE_END", + "UPDATE_LAUNCH_TEMPLATE_FAILED", + "NETWORK_RECOVERY_FAIL" ] }, "JobLogEventData":{ @@ -1909,6 +2248,10 @@ "shape":"EC2InstanceID", "documentation":"

    The ID of a conversion server.

    " }, + "eventResourceData":{ + "shape":"EventResourceData", + "documentation":"

    Properties of resource related to a job event.

    " + }, "rawError":{ "shape":"LargeBoundedString", "documentation":"

    A string representing a job error.

    " @@ -2021,6 +2364,10 @@ "shape":"Boolean", "documentation":"

    Copy tags.

    " }, + "exportBucketArn":{ + "shape":"ARN", + "documentation":"

    S3 bucket ARN to export Source Network templates.

    " + }, "launchConfigurationTemplateID":{ "shape":"LaunchConfigurationTemplateID", "documentation":"

    ID of the Launch Configuration Template.

    " @@ -2336,6 +2683,35 @@ "max":2048, "min":0 }, + "ParticipatingResource":{ + "type":"structure", + "members":{ + "launchStatus":{ + "shape":"LaunchStatus", + "documentation":"

    The launch status of a participating resource.

    " + }, + "participatingResourceID":{ + "shape":"ParticipatingResourceID", + "documentation":"

    The ID of a participating resource.

    " + } + }, + "documentation":"

    Represents a resource participating in an asynchronous Job.

    " + }, + "ParticipatingResourceID":{ + "type":"structure", + "members":{ + "sourceNetworkID":{ + "shape":"SourceNetworkID", + "documentation":"

    Source Network ID.

    " + } + }, + "documentation":"

    ID of a resource participating in an asynchronous Job.

    ", + "union":true + }, + "ParticipatingResources":{ + "type":"list", + "member":{"shape":"ParticipatingResource"} + }, "ParticipatingServer":{ "type":"structure", "members":{ @@ -2708,6 +3084,36 @@ "max":200, "min":1 }, + "RecoveryLifeCycle":{ + "type":"structure", + "members":{ + "apiCallDateTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

    The date and time the last Source Network recovery was initiated.

    " + }, + "jobID":{ + "shape":"JobID", + "documentation":"

    The ID of the Job that was used to last recover the Source Network.

    " + }, + "lastRecoveryResult":{ + "shape":"RecoveryResult", + "documentation":"

    The status of the last recovery status of this Source Network.

    " + } + }, + "documentation":"

    An object representing the Source Network recovery Lifecycle.

    " + }, + "RecoveryResult":{ + "type":"string", + "enum":[ + "NOT_STARTED", + "IN_PROGRESS", + "SUCCESS", + "FAIL", + "PARTIAL_SUCCESS", + "ASSOCIATE_SUCCESS", + "ASSOCIATE_FAIL" + ] + }, "RecoverySnapshot":{ "type":"structure", "required":[ @@ -2849,7 +3255,8 @@ "type":"string", "enum":[ "DEFAULT", - "CUSTOM" + "CUSTOM", + "NONE" ] }, "ReplicationConfigurationReplicatedDisk":{ @@ -3004,6 +3411,15 @@ "max":32, "min":0 }, + "ReplicationStatus":{ + "type":"string", + "enum":[ + "STOPPED", + "IN_PROGRESS", + "PROTECTED", + "ERROR" + ] + }, "ResourceNotFoundException":{ "type":"structure", "members":{ @@ -3062,6 +3478,12 @@ "min":0, "pattern":"^sg-[0-9a-fA-F]{8,}$" }, + "SensitiveBoundedString":{ + "type":"string", + "max":256, + "min":0, + "sensitive":true + }, "ServiceQuotaExceededException":{ "type":"structure", "members":{ @@ -3114,6 +3536,88 @@ }, "documentation":"

    Properties of the cloud environment where this Source Server originated from.

    " }, + "SourceNetwork":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ARN", + "documentation":"

    The ARN of the Source Network.

    " + }, + "cfnStackName":{ + "shape":"CfnStackName", + "documentation":"

    CloudFormation stack name that was deployed for recovering the Source Network.

    " + }, + "lastRecovery":{ + "shape":"RecoveryLifeCycle", + "documentation":"

    An object containing information regarding the last recovery of the Source Network.

    " + }, + "launchedVpcID":{ + "shape":"VpcID", + "documentation":"

    ID of the recovered VPC following Source Network recovery.

    " + }, + "replicationStatus":{ + "shape":"ReplicationStatus", + "documentation":"

    Status of Source Network Replication. Possible values: (a) STOPPED - Source Network is not replicating. (b) IN_PROGRESS - Source Network is being replicated. (c) PROTECTED - Source Network was replicated successfully and is being synchronized for changes. (d) ERROR - Source Network replication has failed

    " + }, + "replicationStatusDetails":{ + "shape":"SensitiveBoundedString", + "documentation":"

    Error details in case Source Network replication status is ERROR.

    " + }, + "sourceAccountID":{ + "shape":"AccountID", + "documentation":"

    Account ID containing the VPC protected by the Source Network.

    " + }, + "sourceNetworkID":{ + "shape":"SourceNetworkID", + "documentation":"

    Source Network ID.

    " + }, + "sourceRegion":{ + "shape":"AwsRegion", + "documentation":"

    Region containing the VPC protected by the Source Network.

    " + }, + "sourceVpcID":{ + "shape":"VpcID", + "documentation":"

    VPC ID protected by the Source Network.

    " + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

    A list of tags associated with the Source Network.

    " + } + }, + "documentation":"

    The ARN of the Source Network.

    " + }, + "SourceNetworkData":{ + "type":"structure", + "members":{ + "sourceNetworkID":{ + "shape":"SourceNetworkID", + "documentation":"

    Source Network ID.

    " + }, + "sourceVpc":{ + "shape":"VpcID", + "documentation":"

    VPC ID protected by the Source Network.

    " + }, + "stackName":{ + "shape":"LargeBoundedString", + "documentation":"

    CloudFormation stack name that was deployed for recovering the Source Network.

    " + }, + "targetVpc":{ + "shape":"VpcID", + "documentation":"

    ID of the recovered VPC following Source Network recovery.

    " + } + }, + "documentation":"

    Properties of Source Network related to a job event.

    " + }, + "SourceNetworkID":{ + "type":"string", + "max":20, + "min":20, + "pattern":"^sn-[0-9a-zA-Z]{17}$" + }, + "SourceNetworksList":{ + "type":"list", + "member":{"shape":"SourceNetwork"} + }, "SourceProperties":{ "type":"structure", "members":{ @@ -3191,6 +3695,10 @@ "shape":"SourceCloudProperties", "documentation":"

    Source cloud properties of the Source Server.

    " }, + "sourceNetworkID":{ + "shape":"SourceNetworkID", + "documentation":"

    ID of the Source Network which is protecting this Source Server's network.

    " + }, "sourceProperties":{ "shape":"SourceProperties", "documentation":"

    The source properties of the Source Server.

    " @@ -3369,6 +3877,73 @@ } } }, + "StartSourceNetworkRecoveryRequest":{ + "type":"structure", + "required":["sourceNetworks"], + "members":{ + "deployAsNew":{ + "shape":"Boolean", + "documentation":"

    Don't update existing CloudFormation Stack, recover the network using a new stack.

    " + }, + "sourceNetworks":{ + "shape":"StartSourceNetworkRecoveryRequestNetworkEntries", + "documentation":"

    The Source Networks that we want to start a Recovery Job for.

    " + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

    The tags to be associated with the Source Network recovery Job.

    " + } + } + }, + "StartSourceNetworkRecoveryRequestNetworkEntries":{ + "type":"list", + "member":{"shape":"StartSourceNetworkRecoveryRequestNetworkEntry"}, + "max":100, + "min":1 + }, + "StartSourceNetworkRecoveryRequestNetworkEntry":{ + "type":"structure", + "required":["sourceNetworkID"], + "members":{ + "cfnStackName":{ + "shape":"CfnStackName", + "documentation":"

    CloudFormation stack name to be used for recovering the network.

    " + }, + "sourceNetworkID":{ + "shape":"SourceNetworkID", + "documentation":"

    The ID of the Source Network you want to recover.

    " + } + }, + "documentation":"

    An object representing the Source Network to recover.

    " + }, + "StartSourceNetworkRecoveryResponse":{ + "type":"structure", + "members":{ + "job":{ + "shape":"Job", + "documentation":"

    The Source Network recovery Job.

    " + } + } + }, + "StartSourceNetworkReplicationRequest":{ + "type":"structure", + "required":["sourceNetworkID"], + "members":{ + "sourceNetworkID":{ + "shape":"SourceNetworkID", + "documentation":"

    ID of the Source Network to replicate.

    " + } + } + }, + "StartSourceNetworkReplicationResponse":{ + "type":"structure", + "members":{ + "sourceNetwork":{ + "shape":"SourceNetwork", + "documentation":"

    Source Network which was requested for replication.

    " + } + } + }, "StopFailbackRequest":{ "type":"structure", "required":["recoveryInstanceID"], @@ -3398,6 +3973,25 @@ } } }, + "StopSourceNetworkReplicationRequest":{ + "type":"structure", + "required":["sourceNetworkID"], + "members":{ + "sourceNetworkID":{ + "shape":"SourceNetworkID", + "documentation":"

    ID of the Source Network to stop replication.

    " + } + } + }, + "StopSourceNetworkReplicationResponse":{ + "type":"structure", + "members":{ + "sourceNetwork":{ + "shape":"SourceNetwork", + "documentation":"

    Source Network which was requested to stop replication.

    " + } + } + }, "StrictlyPositiveInteger":{ "type":"integer", "min":1 @@ -3408,6 +4002,10 @@ "min":0, "pattern":"^subnet-[0-9a-fA-F]{8,}$" }, + "SyntheticTimestamp_date_time":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, "TagKey":{ "type":"string", "max":256, @@ -3603,6 +4201,10 @@ "shape":"Boolean", "documentation":"

    Copy tags.

    " }, + "exportBucketArn":{ + "shape":"ARN", + "documentation":"

    S3 bucket ARN to export Source Network templates.

    " + }, "launchConfigurationTemplateID":{ "shape":"LaunchConfigurationTemplateID", "documentation":"

    Launch Configuration Template ID.

    " @@ -3831,6 +4433,12 @@ "type":"map", "key":{"shape":"LargeBoundedString"}, "value":{"shape":"PositiveInteger"} + }, + "VpcID":{ + "type":"string", + "max":21, + "min":12, + "pattern":"^vpc-[0-9a-fA-F]{8,}$" } }, "documentation":"

    AWS Elastic Disaster Recovery Service.

    " diff --git a/services/dynamodb/pom.xml b/services/dynamodb/pom.xml index d8db951ccf36..0f99e88377fc 100644 --- a/services/dynamodb/pom.xml +++ b/services/dynamodb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT dynamodb AWS Java SDK :: Services :: Amazon DynamoDB diff --git a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json index 6ced961e524a..503507b44955 100644 --- a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json +++ b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json @@ -41,7 +41,7 @@ {"shape":"RequestLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

    The BatchGetItem operation returns the attributes of one or more items from one or more tables. You identify requested items by primary key.

    A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items. BatchGetItem returns a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, or an internal processing failure occurs. If a partial result is returned, the operation returns a value for UnprocessedKeys. You can use this value to retry the operation starting with the next item to get.

    If you request more than 100 items, BatchGetItem returns a ValidationException with the message \"Too many items requested for the BatchGetItem call.\"

    For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys value so you can get the next page of results. If desired, your application can include its own logic to assemble the pages of results into one dataset.

    If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchGetItem returns a ProvisionedThroughputExceededException. If at least one of the items is successfully processed, then BatchGetItem completes successfully, while returning the keys of the unread items in UnprocessedKeys.

    If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

    For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

    By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to true for any or all tables.

    In order to minimize response latency, BatchGetItem may retrieve items in parallel.

    When designing your application, keep in mind that DynamoDB does not return items in any particular order. To help parse the response by item, include the primary key values for the items in your request in the ProjectionExpression parameter.

    If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum read capacity units according to the type of read. For more information, see Working with Tables in the Amazon DynamoDB Developer Guide.

    ", + "documentation":"

    The BatchGetItem operation returns the attributes of one or more items from one or more tables. You identify requested items by primary key.

    A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items. BatchGetItem returns a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, more than 1MB per partition is requested, or an internal processing failure occurs. If a partial result is returned, the operation returns a value for UnprocessedKeys. You can use this value to retry the operation starting with the next item to get.

    If you request more than 100 items, BatchGetItem returns a ValidationException with the message \"Too many items requested for the BatchGetItem call.\"

    For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys value so you can get the next page of results. If desired, your application can include its own logic to assemble the pages of results into one dataset.

    If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchGetItem returns a ProvisionedThroughputExceededException. If at least one of the items is successfully processed, then BatchGetItem completes successfully, while returning the keys of the unread items in UnprocessedKeys.

    If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

    For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

    By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to true for any or all tables.

    In order to minimize response latency, BatchGetItem may retrieve items in parallel.

    When designing your application, keep in mind that DynamoDB does not return items in any particular order. To help parse the response by item, include the primary key values for the items in your request in the ProjectionExpression parameter.

    If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum read capacity units according to the type of read. For more information, see Working with Tables in the Amazon DynamoDB Developer Guide.

    ", "endpointdiscovery":{ } }, @@ -3725,7 +3725,7 @@ "documentation":"

    Too many operations for a given subscriber.

    " } }, - "documentation":"

    There is no limit to the number of daily on-demand backups that can be taken.

    For most purposes, up to 500 simultaneous table operations are allowed per account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, and RestoreTableToPointInTime.

    When you are creating a table with one or more secondary indexes, you can have up to 250 such requests running at a time. However, if the table or index specifications are complex, then DynamoDB might temporarily reduce the number of concurrent operations.

    When importing into DynamoDB, up to 50 simultaneous import table operations are allowed per account.

    There is a soft account quota of 2,500 tables.

    ", + "documentation":"

    There is no limit to the number of daily on-demand backups that can be taken.

    For most purposes, up to 500 simultaneous table operations are allowed per account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, and RestoreTableToPointInTime.

    When you are creating a table with one or more secondary indexes, you can have up to 250 such requests running at a time. However, if the table or index specifications are complex, then DynamoDB might temporarily reduce the number of concurrent operations.

    When importing into DynamoDB, up to 50 simultaneous import table operations are allowed per account.

    There is a soft account quota of 2,500 tables.

    GetRecords was called with a value of more than 1000 for the limit request parameter.

    More than 2 processes are reading from the same streams shard at the same time. Exceeding this limit may result in request throttling.

    ", "exception":true }, "ListAttributeValue":{ @@ -4214,11 +4214,11 @@ "members":{ "ReadCapacityUnits":{ "shape":"PositiveLongObject", - "documentation":"

    The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide.

    If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.

    " + "documentation":"

    The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide.

    If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.

    " }, "WriteCapacityUnits":{ "shape":"PositiveLongObject", - "documentation":"

    The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide.

    If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.

    " + "documentation":"

    The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide.

    If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.

    " } }, "documentation":"

    Represents the provisioned throughput settings for a specified table or index. The settings can be modified using the UpdateTable operation.

    For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide.

    " diff --git a/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/endpoint-rule-set.json b/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/endpoint-rule-set.json index d086a70a8612..911bf62628e8 100644 --- a/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/endpoint-rule-set.json +++ b/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,94 +111,321 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://streams.dynamodb-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-us-gov", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://streams.dynamodb.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://streams.dynamodb-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://streams.dynamodb-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://streams.dynamodb.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "local" + ] + } + ], + "endpoint": { + "url": "http://localhost:8000", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "dynamodb", + "signingRegion": "us-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://streams.dynamodb.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-cn", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://streams.dynamodb.{Region}.amazonaws.com.cn", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [ { @@ -238,125 +445,81 @@ } ], "endpoint": { - "url": "https://streams.dynamodb.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://streams.dynamodb.{Region}.amazonaws.com", "properties": {}, "headers": {} }, "type": "endpoint" }, { - "conditions": [], + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-iso", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], "endpoint": { - "url": "https://streams.dynamodb-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://streams.dynamodb.{Region}.c2s.ic.gov", "properties": {}, "headers": {} }, "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, + }, { - "fn": "getAttr", - "argv": [ + "conditions": [ { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] + "fn": "stringEquals", + "argv": [ + "aws-iso-b", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://streams.dynamodb.{Region}.sc2s.sgov.gov", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://streams.dynamodb.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://streams.dynamodb.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "local" - ] - } - ], - "endpoint": { - "url": "http://localhost:8000", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "dynamodb" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://streams.dynamodb.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/endpoint-tests.json b/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/endpoint-tests.json index d24464c47857..8fa93e555fbe 100644 --- a/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/endpoint-tests.json +++ b/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/endpoint-tests.json @@ -1,1028 +1,31 @@ { "testCases": [ { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-south-2.api.aws" - } - }, - "params": { - "Region": "ap-south-2", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-south-2.amazonaws.com" - } - }, - "params": { - "Region": "ap-south-2", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.ap-south-2.api.aws" - } - }, - "params": { - "Region": "ap-south-2", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.ap-south-2.amazonaws.com" - } - }, - "params": { - "Region": "ap-south-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-south-1.api.aws" - } - }, - "params": { - "Region": "ap-south-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-south-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.ap-south-1.api.aws" - } - }, - "params": { - "Region": "ap-south-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.ap-south-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-south-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-south-1.api.aws" - } - }, - "params": { - "Region": "eu-south-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-south-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-south-1.api.aws" - } - }, - "params": { - "Region": "eu-south-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-south-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-south-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-south-2.api.aws" - } - }, - "params": { - "Region": "eu-south-2", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-south-2.amazonaws.com" - } - }, - "params": { - "Region": "eu-south-2", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-south-2.api.aws" - } - }, - "params": { - "Region": "eu-south-2", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-south-2.amazonaws.com" - } - }, - "params": { - "Region": "eu-south-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.me-central-1.api.aws" - } - }, - "params": { - "Region": "me-central-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.me-central-1.amazonaws.com" - } - }, - "params": { - "Region": "me-central-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.me-central-1.api.aws" - } - }, - "params": { - "Region": "me-central-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.me-central-1.amazonaws.com" - } - }, - "params": { - "Region": "me-central-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ca-central-1.api.aws" - } - }, - "params": { - "Region": "ca-central-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "Region": "ca-central-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.ca-central-1.api.aws" - } - }, - "params": { - "Region": "ca-central-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.ca-central-1.amazonaws.com" - } - }, - "params": { - "Region": "ca-central-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-central-1.api.aws" - } - }, - "params": { - "Region": "eu-central-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-central-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-central-1.api.aws" - } - }, - "params": { - "Region": "eu-central-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-central-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-central-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "Region": "us-iso-west-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.us-iso-west-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-west-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "Region": "us-iso-west-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.us-iso-west-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-west-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-central-2.api.aws" - } - }, - "params": { - "Region": "eu-central-2", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-central-2.amazonaws.com" - } - }, - "params": { - "Region": "eu-central-2", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-central-2.api.aws" - } - }, - "params": { - "Region": "eu-central-2", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-central-2.amazonaws.com" - } - }, - "params": { - "Region": "eu-central-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.us-west-1.api.aws" - } - }, - "params": { - "Region": "us-west-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "Region": "us-west-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.us-west-1.api.aws" - } - }, - "params": { - "Region": "us-west-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.us-west-1.amazonaws.com" - } - }, - "params": { - "Region": "us-west-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.us-west-2.api.aws" - } - }, - "params": { - "Region": "us-west-2", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "Region": "us-west-2", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.us-west-2.api.aws" - } - }, - "params": { - "Region": "us-west-2", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.us-west-2.amazonaws.com" - } - }, - "params": { - "Region": "us-west-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.af-south-1.api.aws" - } - }, - "params": { - "Region": "af-south-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "Region": "af-south-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.af-south-1.api.aws" - } - }, - "params": { - "Region": "af-south-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.af-south-1.amazonaws.com" - } - }, - "params": { - "Region": "af-south-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-north-1.api.aws" - } - }, - "params": { - "Region": "eu-north-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-north-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-north-1.api.aws" - } - }, - "params": { - "Region": "eu-north-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-north-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-north-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-west-3.api.aws" - } - }, - "params": { - "Region": "eu-west-3", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-3", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-west-3.api.aws" - } - }, - "params": { - "Region": "eu-west-3", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-west-3.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-3", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-west-2.api.aws" - } - }, - "params": { - "Region": "eu-west-2", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-2", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-west-2.api.aws" - } - }, - "params": { - "Region": "eu-west-2", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-west-2.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-west-1.api.aws" - } - }, - "params": { - "Region": "eu-west-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-west-1.api.aws" - } - }, - "params": { - "Region": "eu-west-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.eu-west-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "Region": "ap-northeast-3", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-3", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.ap-northeast-3.api.aws" - } - }, - "params": { - "Region": "ap-northeast-3", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.ap-northeast-3.amazonaws.com" + "url": "https://streams.dynamodb.af-south-1.amazonaws.com" } }, "params": { - "Region": "ap-northeast-3", + "Region": "af-south-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "Region": "ap-northeast-2", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-2", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.ap-northeast-2.api.aws" - } - }, - "params": { - "Region": "ap-northeast-2", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.ap-northeast-2.amazonaws.com" + "url": "https://streams.dynamodb.ap-east-1.amazonaws.com" } }, "params": { - "Region": "ap-northeast-2", + "Region": "ap-east-1", "UseFIPS": false, "UseDualStack": false } }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "Region": "ap-northeast-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.ap-northeast-1.api.aws" - } - }, - "params": { - "Region": "ap-northeast-1", - "UseFIPS": false, - "UseDualStack": true - } - }, { "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { @@ -1037,673 +40,513 @@ } }, { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.me-south-1.api.aws" - } - }, - "params": { - "Region": "me-south-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "Region": "me-south-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.me-south-1.api.aws" - } - }, - "params": { - "Region": "me-south-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.me-south-1.amazonaws.com" - } - }, - "params": { - "Region": "me-south-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.sa-east-1.api.aws" - } - }, - "params": { - "Region": "sa-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.sa-east-1.amazonaws.com" - } - }, - "params": { - "Region": "sa-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.sa-east-1.api.aws" - } - }, - "params": { - "Region": "sa-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.sa-east-1.amazonaws.com" + "url": "https://streams.dynamodb.ap-northeast-2.amazonaws.com" } }, "params": { - "Region": "sa-east-1", + "Region": "ap-northeast-2", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-east-1.api.aws" - } - }, - "params": { - "Region": "ap-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-east-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb.ap-east-1.api.aws" - } - }, - "params": { - "Region": "ap-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.ap-east-1.amazonaws.com" + "url": "https://streams.dynamodb.ap-northeast-3.amazonaws.com" } }, "params": { - "Region": "ap-east-1", + "Region": "ap-northeast-3", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://streams.dynamodb.ap-south-1.amazonaws.com" } }, "params": { - "Region": "cn-north-1", + "Region": "ap-south-1", "UseFIPS": false, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.cn-north-1.amazonaws.com.cn" + "url": "https://streams.dynamodb.ap-southeast-1.amazonaws.com" } }, "params": { - "Region": "cn-north-1", + "Region": "ap-southeast-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.us-gov-west-1.api.aws" + "url": "https://streams.dynamodb.ap-southeast-2.amazonaws.com" } }, "params": { - "Region": "us-gov-west-1", - "UseFIPS": true, - "UseDualStack": true + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.us-gov-west-1.amazonaws.com" + "url": "https://streams.dynamodb.ap-southeast-3.amazonaws.com" } }, "params": { - "Region": "us-gov-west-1", - "UseFIPS": true, + "Region": "ap-southeast-3", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.us-gov-west-1.api.aws" + "url": "https://streams.dynamodb.ca-central-1.amazonaws.com" } }, "params": { - "Region": "us-gov-west-1", + "Region": "ca-central-1", "UseFIPS": false, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.us-gov-west-1.amazonaws.com" + "url": "https://streams.dynamodb.eu-central-1.amazonaws.com" } }, "params": { - "Region": "us-gov-west-1", + "Region": "eu-central-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.ap-southeast-1.api.aws" + "url": "https://streams.dynamodb.eu-north-1.amazonaws.com" } }, "params": { - "Region": "ap-southeast-1", - "UseFIPS": true, - "UseDualStack": true + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.ap-southeast-1.amazonaws.com" + "url": "https://streams.dynamodb.eu-south-1.amazonaws.com" } }, "params": { - "Region": "ap-southeast-1", - "UseFIPS": true, + "Region": "eu-south-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.ap-southeast-1.api.aws" + "url": "https://streams.dynamodb.eu-west-1.amazonaws.com" } }, "params": { - "Region": "ap-southeast-1", + "Region": "eu-west-1", "UseFIPS": false, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.ap-southeast-1.amazonaws.com" + "url": "https://streams.dynamodb.eu-west-2.amazonaws.com" } }, "params": { - "Region": "ap-southeast-1", + "Region": "eu-west-2", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.ap-southeast-2.api.aws" + "url": "https://streams.dynamodb.eu-west-3.amazonaws.com" } }, "params": { - "Region": "ap-southeast-2", - "UseFIPS": true, - "UseDualStack": true + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region local with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.ap-southeast-2.amazonaws.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "dynamodb", + "signingRegion": "us-east-1" + } + ] + }, + "url": "http://localhost:8000" } }, "params": { - "Region": "ap-southeast-2", - "UseFIPS": true, + "Region": "local", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.ap-southeast-2.api.aws" + "url": "https://streams.dynamodb.me-south-1.amazonaws.com" } }, "params": { - "Region": "ap-southeast-2", + "Region": "me-south-1", "UseFIPS": false, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.ap-southeast-2.amazonaws.com" + "url": "https://streams.dynamodb.sa-east-1.amazonaws.com" } }, "params": { - "Region": "ap-southeast-2", + "Region": "sa-east-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://streams.dynamodb.us-east-1.amazonaws.com" + } }, "params": { - "Region": "us-iso-east-1", - "UseFIPS": true, - "UseDualStack": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://streams.dynamodb.us-east-2.amazonaws.com" } }, "params": { - "Region": "us-iso-east-1", - "UseFIPS": true, + "Region": "us-east-2", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://streams.dynamodb.us-west-1.amazonaws.com" + } }, "params": { - "Region": "us-iso-east-1", + "Region": "us-west-1", "UseFIPS": false, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.us-iso-east-1.c2s.ic.gov" + "url": "https://streams.dynamodb.us-west-2.amazonaws.com" } }, "params": { - "Region": "us-iso-east-1", + "Region": "us-west-2", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.ap-southeast-3.api.aws" + "url": "https://streams.dynamodb-fips.us-east-1.api.aws" } }, "params": { - "Region": "ap-southeast-3", + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.ap-southeast-3.amazonaws.com" + "url": "https://streams.dynamodb-fips.us-east-1.amazonaws.com" } }, "params": { - "Region": "ap-southeast-3", + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.ap-southeast-3.api.aws" + "url": "https://streams.dynamodb.us-east-1.api.aws" } }, "params": { - "Region": "ap-southeast-3", + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.ap-southeast-3.amazonaws.com" + "url": "https://streams.dynamodb.cn-north-1.amazonaws.com.cn" } }, "params": { - "Region": "ap-southeast-3", + "Region": "cn-north-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.ap-southeast-4.api.aws" - } - }, - "params": { - "Region": "ap-southeast-4", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.ap-southeast-4.amazonaws.com" + "url": "https://streams.dynamodb.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "ap-southeast-4", - "UseFIPS": true, + "Region": "cn-northwest-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.ap-southeast-4.api.aws" + "url": "https://streams.dynamodb-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "ap-southeast-4", - "UseFIPS": false, + "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.ap-southeast-4.amazonaws.com" + "url": "https://streams.dynamodb-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "Region": "ap-southeast-4", - "UseFIPS": false, + "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.us-east-1.api.aws" + "url": "https://streams.dynamodb.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "us-east-1", - "UseFIPS": true, + "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.us-east-1.amazonaws.com" + "url": "https://streams.dynamodb.us-gov-east-1.amazonaws.com" } }, "params": { - "Region": "us-east-1", - "UseFIPS": true, + "Region": "us-gov-east-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.us-east-1.api.aws" + "url": "https://streams.dynamodb.us-gov-east-1.amazonaws.com" } }, "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.us-east-1.amazonaws.com" + "url": "https://streams.dynamodb.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-east-1", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.us-east-2.api.aws" + "url": "https://streams.dynamodb.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-east-2", + "Region": "us-gov-west-1", "UseFIPS": true, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.us-east-2.amazonaws.com" + "url": "https://streams.dynamodb-fips.us-gov-east-1.api.aws" } }, "params": { - "Region": "us-east-2", + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.us-east-2.api.aws" + "url": "https://streams.dynamodb.us-gov-east-1.api.aws" } }, "params": { - "Region": "us-east-2", + "Region": "us-gov-east-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.us-east-2.amazonaws.com" + "url": "https://streams.dynamodb.us-iso-east-1.c2s.ic.gov" } }, "params": { - "Region": "us-east-2", + "Region": "us-iso-east-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://streams.dynamodb-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "cn-northwest-1", + "Region": "us-iso-east-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb-fips.cn-northwest-1.amazonaws.com.cn" + "url": "https://streams.dynamodb-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "Region": "cn-northwest-1", + "Region": "us-iso-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://streams.dynamodb.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "cn-northwest-1", + "Region": "us-iso-east-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.cn-northwest-1.amazonaws.com.cn" + "url": "https://streams.dynamodb.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "Region": "cn-northwest-1", + "Region": "us-isob-east-1", "UseFIPS": false, "UseDualStack": false } @@ -1744,27 +587,27 @@ } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://streams.dynamodb.us-isob-east-1.sc2s.sgov.gov" + "url": "https://example.com" } }, "params": { - "Region": "us-isob-east-1", + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, "Endpoint": "https://example.com" @@ -1793,6 +636,12 @@ "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/service-2.json b/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/service-2.json index 9b65e8fcf831..098679799516 100644 --- a/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/service-2.json +++ b/services/dynamodb/src/main/resources/codegen-resources/dynamodbstreams/service-2.json @@ -314,7 +314,7 @@ "documentation":"

    Too many operations for a given subscriber.

    " } }, - "documentation":"

    There is no limit to the number of daily on-demand backups that can be taken.

    For most purposes, up to 500 simultaneous table operations are allowed per account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, and RestoreTableToPointInTime.

    When you are creating a table with one or more secondary indexes, you can have up to 250 such requests running at a time. However, if the table or index specifications are complex, then DynamoDB might temporarily reduce the number of concurrent operations.

    When importing into DynamoDB, up to 50 simultaneous import table operations are allowed per account.

    There is a soft account quota of 2,500 tables.

    ", + "documentation":"

    There is no limit to the number of daily on-demand backups that can be taken.

    For most purposes, up to 500 simultaneous table operations are allowed per account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, and RestoreTableToPointInTime.

    When you are creating a table with one or more secondary indexes, you can have up to 250 such requests running at a time. However, if the table or index specifications are complex, then DynamoDB might temporarily reduce the number of concurrent operations.

    When importing into DynamoDB, up to 50 simultaneous import table operations are allowed per account.

    There is a soft account quota of 2,500 tables.

    GetRecords was called with a value of more than 1000 for the limit request parameter.

    More than 2 processes are reading from the same streams shard at the same time. Exceeding this limit may result in request throttling.

    ", "exception":true }, "ListAttributeValue":{ @@ -397,7 +397,7 @@ }, "eventSource":{ "shape":"String", - "documentation":"

    The AWS service from which the stream record originated. For DynamoDB Streams, this is aws:dynamodb.

    " + "documentation":"

    The Amazon Web Services service from which the stream record originated. For DynamoDB Streams, this is aws:dynamodb.

    " }, "awsRegion":{ "shape":"String", @@ -502,7 +502,7 @@ }, "StreamLabel":{ "shape":"String", - "documentation":"

    A timestamp, in ISO 8601 format, for this stream.

    Note that LatestStreamLabel is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique:

    • the AWS customer ID.

    • the table name

    • the StreamLabel

    " + "documentation":"

    A timestamp, in ISO 8601 format, for this stream.

    Note that LatestStreamLabel is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique:

    • the Amazon Web Services customer ID.

    • the table name

    • the StreamLabel

    " } }, "documentation":"

    Represents all of the data describing a particular stream.

    " @@ -521,7 +521,7 @@ }, "StreamLabel":{ "shape":"String", - "documentation":"

    A timestamp, in ISO 8601 format, for this stream.

    Note that LatestStreamLabel is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique:

    • the AWS customer ID.

    • the table name

    • the StreamLabel

    " + "documentation":"

    A timestamp, in ISO 8601 format, for this stream.

    Note that LatestStreamLabel is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique:

    • the Amazon Web Services customer ID.

    • the table name

    • the StreamLabel

    " }, "StreamStatus":{ "shape":"StreamStatus", @@ -563,7 +563,7 @@ "members":{ "ApproximateCreationDateTime":{ "shape":"Date", - "documentation":"

    The approximate date and time when the stream record was created, in UNIX epoch time format.

    " + "documentation":"

    The approximate date and time when the stream record was created, in UNIX epoch time format and rounded down to the closest second.

    " }, "Keys":{ "shape":"AttributeMap", diff --git a/services/ebs/pom.xml b/services/ebs/pom.xml index a9b16516146a..bee536adb4e0 100644 --- a/services/ebs/pom.xml +++ b/services/ebs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ebs AWS Java SDK :: Services :: EBS diff --git a/services/ec2/pom.xml b/services/ec2/pom.xml index cd89c04f700b..57ca6f02b955 100644 --- a/services/ec2/pom.xml +++ b/services/ec2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ec2 AWS Java SDK :: Services :: Amazon EC2 diff --git a/services/ec2/src/main/resources/codegen-resources/paginators-1.json b/services/ec2/src/main/resources/codegen-resources/paginators-1.json index c2d22676e342..7d3eebedeb8a 100644 --- a/services/ec2/src/main/resources/codegen-resources/paginators-1.json +++ b/services/ec2/src/main/resources/codegen-resources/paginators-1.json @@ -195,6 +195,12 @@ "output_token": "NextToken", "result_key": "ImportSnapshotTasks" }, + "DescribeInstanceConnectEndpoints": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "InstanceConnectEndpoints" + }, "DescribeInstanceCreditSpecifications": { "input_token": "NextToken", "limit_key": "MaxResults", diff --git a/services/ec2/src/main/resources/codegen-resources/service-2.json b/services/ec2/src/main/resources/codegen-resources/service-2.json index 83eb0238bc50..e3c1e8bb59ac 100644 --- a/services/ec2/src/main/resources/codegen-resources/service-2.json +++ b/services/ec2/src/main/resources/codegen-resources/service-2.json @@ -100,7 +100,7 @@ }, "input":{"shape":"AllocateAddressRequest"}, "output":{"shape":"AllocateAddressResult"}, - "documentation":"

    Allocates an Elastic IP address to your Amazon Web Services account. After you allocate the Elastic IP address you can associate it with an instance or network interface. After you release an Elastic IP address, it is released to the IP address pool and can be allocated to a different Amazon Web Services account.

    You can allocate an Elastic IP address from an address pool owned by Amazon Web Services or from an address pool created from a public IPv4 address range that you have brought to Amazon Web Services for use with your Amazon Web Services resources using bring your own IP addresses (BYOIP). For more information, see Bring Your Own IP Addresses (BYOIP) in the Amazon Elastic Compute Cloud User Guide.

    [EC2-VPC] If you release an Elastic IP address, you might be able to recover it. You cannot recover an Elastic IP address that you released after it is allocated to another Amazon Web Services account. You cannot recover an Elastic IP address for EC2-Classic. To attempt to recover an Elastic IP address that you released, specify it in this operation.

    An Elastic IP address is for use either in the EC2-Classic platform or in a VPC. By default, you can allocate 5 Elastic IP addresses for EC2-Classic per Region and 5 Elastic IP addresses for EC2-VPC per Region.

    For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    You can allocate a carrier IP address which is a public IP address from a telecommunication carrier, to a network interface which resides in a subnet in a Wavelength Zone (for example an EC2 instance).

    We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.

    " + "documentation":"

    Allocates an Elastic IP address to your Amazon Web Services account. After you allocate the Elastic IP address you can associate it with an instance or network interface. After you release an Elastic IP address, it is released to the IP address pool and can be allocated to a different Amazon Web Services account.

    You can allocate an Elastic IP address from an address pool owned by Amazon Web Services or from an address pool created from a public IPv4 address range that you have brought to Amazon Web Services for use with your Amazon Web Services resources using bring your own IP addresses (BYOIP). For more information, see Bring Your Own IP Addresses (BYOIP) in the Amazon Elastic Compute Cloud User Guide.

    If you release an Elastic IP address, you might be able to recover it. You cannot recover an Elastic IP address that you released after it is allocated to another Amazon Web Services account. To attempt to recover an Elastic IP address that you released, specify it in this operation.

    For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    You can allocate a carrier IP address which is a public IP address from a telecommunication carrier, to a network interface which resides in a subnet in a Wavelength Zone (for example an EC2 instance).

    " }, "AllocateHosts":{ "name":"AllocateHosts", @@ -120,7 +120,7 @@ }, "input":{"shape":"AllocateIpamPoolCidrRequest"}, "output":{"shape":"AllocateIpamPoolCidrResult"}, - "documentation":"

    Allocate a CIDR from an IPAM pool. In IPAM, an allocation is a CIDR assignment from an IPAM pool to another IPAM pool or to a resource. For more information, see Allocate CIDRs in the Amazon VPC IPAM User Guide.

    This action creates an allocation with strong consistency. The returned CIDR will not overlap with any other allocations from the same pool.

    " + "documentation":"

    Allocate a CIDR from an IPAM pool. The Region you use should be the IPAM pool locale. The locale is the Amazon Web Services Region where this IPAM pool is available for allocations.

    In IPAM, an allocation is a CIDR assignment from an IPAM pool to another IPAM pool or to a resource. For more information, see Allocate CIDRs in the Amazon VPC IPAM User Guide.

    This action creates an allocation with strong consistency. The returned CIDR will not overlap with any other allocations from the same pool.

    " }, "ApplySecurityGroupsToClientVpnTargetNetwork":{ "name":"ApplySecurityGroupsToClientVpnTargetNetwork", @@ -170,7 +170,7 @@ }, "input":{"shape":"AssociateAddressRequest"}, "output":{"shape":"AssociateAddressResult"}, - "documentation":"

    Associates an Elastic IP address, or carrier IP address (for instances that are in subnets in Wavelength Zones) with an instance or a network interface. Before you can use an Elastic IP address, you must allocate it to your account.

    An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    [EC2-Classic, VPC in an EC2-VPC-only account] If the Elastic IP address is already associated with a different instance, it is disassociated from that instance and associated with the specified instance. If you associate an Elastic IP address with an instance that has an existing Elastic IP address, the existing address is disassociated from the instance, but remains allocated to your account.

    [VPC in an EC2-Classic account] If you don't specify a private IP address, the Elastic IP address is associated with the primary IP address. If the Elastic IP address is already associated with a different instance or a network interface, you get an error unless you allow reassociation. You cannot associate an Elastic IP address with an instance or network interface that has an existing Elastic IP address.

    [Subnets in Wavelength Zones] You can associate an IP address from the telecommunication carrier to the instance or network interface.

    You cannot associate an Elastic IP address with an interface in a different network border group.

    This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error, and you may be charged for each time the Elastic IP address is remapped to the same instance. For more information, see the Elastic IP Addresses section of Amazon EC2 Pricing.

    We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.

    " + "documentation":"

    Associates an Elastic IP address, or carrier IP address (for instances that are in subnets in Wavelength Zones) with an instance or a network interface. Before you can use an Elastic IP address, you must allocate it to your account.

    If the Elastic IP address is already associated with a different instance, it is disassociated from that instance and associated with the specified instance. If you associate an Elastic IP address with an instance that has an existing Elastic IP address, the existing address is disassociated from the instance, but remains allocated to your account.

    [Subnets in Wavelength Zones] You can associate an IP address from the telecommunication carrier to the instance or network interface.

    You cannot associate an Elastic IP address with an interface in a different network border group.

    This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error, and you may be charged for each time the Elastic IP address is remapped to the same instance. For more information, see the Elastic IP Addresses section of Amazon EC2 Pricing.

    " }, "AssociateClientVpnTargetNetwork":{ "name":"AssociateClientVpnTargetNetwork", @@ -676,7 +676,7 @@ }, "input":{"shape":"CreateFleetRequest"}, "output":{"shape":"CreateFleetResult"}, - "documentation":"

    Launches an EC2 Fleet.

    You can create a single EC2 Fleet that includes multiple launch specifications that vary by instance type, AMI, Availability Zone, or subnet.

    For more information, see EC2 Fleet in the Amazon EC2 User Guide.

    " + "documentation":"

    Creates an EC2 Fleet that contains the configuration information for On-Demand Instances and Spot Instances. Instances are launched immediately if there is available capacity.

    A single EC2 Fleet can include multiple launch specifications that vary by instance type, AMI, Availability Zone, or subnet.

    For more information, see EC2 Fleet in the Amazon EC2 User Guide.

    " }, "CreateFlowLogs":{ "name":"CreateFlowLogs", @@ -708,6 +708,16 @@ "output":{"shape":"CreateImageResult"}, "documentation":"

    Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance that is either running or stopped.

    By default, when Amazon EC2 creates the new AMI, it reboots the instance so that it can take snapshots of the attached volumes while data is at rest, in order to ensure a consistent state. You can set the NoReboot parameter to true in the API request, or use the --no-reboot option in the CLI to prevent Amazon EC2 from shutting down and rebooting the instance.

    If you choose to bypass the shutdown and reboot process by setting the NoReboot parameter to true in the API request, or by using the --no-reboot option in the CLI, we can't guarantee the file system integrity of the created image.

    If you customized your instance with instance store volumes or Amazon EBS volumes in addition to the root device volume, the new AMI contains block device mapping information for those volumes. When you launch an instance from this new AMI, the instance automatically launches with those additional volumes.

    For more information, see Create an Amazon EBS-backed Linux AMI in the Amazon Elastic Compute Cloud User Guide.

    " }, + "CreateInstanceConnectEndpoint":{ + "name":"CreateInstanceConnectEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInstanceConnectEndpointRequest"}, + "output":{"shape":"CreateInstanceConnectEndpointResult"}, + "documentation":"

    Creates an EC2 Instance Connect Endpoint.

    An EC2 Instance Connect Endpoint allows you to connect to a resource, without requiring the resource to have a public IPv4 address. For more information, see Connect to your resources without requiring a public IPv4 address using EC2 Instance Connect Endpoint in the Amazon EC2 User Guide.

    " + }, "CreateInstanceEventWindow":{ "name":"CreateInstanceEventWindow", "http":{ @@ -1463,6 +1473,16 @@ "output":{"shape":"DeleteFpgaImageResult"}, "documentation":"

    Deletes the specified Amazon FPGA Image (AFI).

    " }, + "DeleteInstanceConnectEndpoint":{ + "name":"DeleteInstanceConnectEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteInstanceConnectEndpointRequest"}, + "output":{"shape":"DeleteInstanceConnectEndpointResult"}, + "documentation":"

    Deletes the specified EC2 Instance Connect Endpoint.

    " + }, "DeleteInstanceEventWindow":{ "name":"DeleteInstanceEventWindow", "http":{ @@ -2142,7 +2162,7 @@ }, "input":{"shape":"DescribeAccountAttributesRequest"}, "output":{"shape":"DescribeAccountAttributesResult"}, - "documentation":"

    Describes attributes of your Amazon Web Services account. The following are the supported account attributes:

    • supported-platforms: Indicates whether your account can launch instances into EC2-Classic and EC2-VPC, or only into EC2-VPC.

    • default-vpc: The ID of the default VPC for your account, or none.

    • max-instances: This attribute is no longer supported. The returned value does not reflect your actual vCPU limit for running On-Demand Instances. For more information, see On-Demand Instance Limits in the Amazon Elastic Compute Cloud User Guide.

    • vpc-max-security-groups-per-interface: The maximum number of security groups that you can assign to a network interface.

    • max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate for use with EC2-Classic.

    • vpc-max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate for use with EC2-VPC.

    We are retiring EC2-Classic on August 15, 2022. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon EC2 User Guide.

    " + "documentation":"

    Describes attributes of your Amazon Web Services account. The following are the supported account attributes:

    • default-vpc: The ID of the default VPC for your account, or none.

    • max-instances: This attribute is no longer supported. The returned value does not reflect your actual vCPU limit for running On-Demand Instances. For more information, see On-Demand Instance Limits in the Amazon Elastic Compute Cloud User Guide.

    • max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate.

    • supported-platforms: This attribute is deprecated.

    • vpc-max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate.

    • vpc-max-security-groups-per-interface: The maximum number of security groups that you can assign to a network interface.

    " }, "DescribeAddressTransfers":{ "name":"DescribeAddressTransfers", @@ -2152,7 +2172,7 @@ }, "input":{"shape":"DescribeAddressTransfersRequest"}, "output":{"shape":"DescribeAddressTransfersResult"}, - "documentation":"

    Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon Virtual Private Cloud User Guide.

    " + "documentation":"

    Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon Virtual Private Cloud User Guide.

    When you transfer an Elastic IP address, there is a two-step handshake between the source and transfer Amazon Web Services accounts. When the source account starts the transfer, the transfer account has seven days to accept the Elastic IP address transfer. During those seven days, the source account can view the pending transfer by using this action. After seven days, the transfer expires and ownership of the Elastic IP address returns to the source account. Accepted transfers are visible to the source account for three days after the transfers have been accepted.

    " }, "DescribeAddresses":{ "name":"DescribeAddresses", @@ -2162,7 +2182,7 @@ }, "input":{"shape":"DescribeAddressesRequest"}, "output":{"shape":"DescribeAddressesResult"}, - "documentation":"

    Describes the specified Elastic IP addresses or all of your Elastic IP addresses.

    An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.

    " + "documentation":"

    Describes the specified Elastic IP addresses or all of your Elastic IP addresses.

    " }, "DescribeAddressesAttribute":{ "name":"DescribeAddressesAttribute", @@ -2584,6 +2604,16 @@ "output":{"shape":"InstanceAttribute"}, "documentation":"

    Describes the specified attribute of the specified instance. You can specify only one attribute at a time. Valid attribute values are: instanceType | kernel | ramdisk | userData | disableApiTermination | instanceInitiatedShutdownBehavior | rootDeviceName | blockDeviceMapping | productCodes | sourceDestCheck | groupSet | ebsOptimized | sriovNetSupport

    " }, + "DescribeInstanceConnectEndpoints":{ + "name":"DescribeInstanceConnectEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstanceConnectEndpointsRequest"}, + "output":{"shape":"DescribeInstanceConnectEndpointsResult"}, + "documentation":"

    Describes the specified EC2 Instance Connect Endpoints or all EC2 Instance Connect Endpoints.

    " + }, "DescribeInstanceCreditSpecifications":{ "name":"DescribeInstanceCreditSpecifications", "http":{ @@ -2832,7 +2862,7 @@ }, "input":{"shape":"DescribeMovingAddressesRequest"}, "output":{"shape":"DescribeMovingAddressesResult"}, - "documentation":"

    Describes your Elastic IP addresses that are being moved to the EC2-VPC platform, or that are being restored to the EC2-Classic platform. This request does not return information about any other Elastic IP addresses in your account.

    " + "documentation":"

    This action is deprecated.

    Describes your Elastic IP addresses that are being moved from or being restored to the EC2-Classic platform. This request does not return information about any other Elastic IP addresses in your account.

    " }, "DescribeNatGateways":{ "name":"DescribeNatGateways", @@ -3757,7 +3787,7 @@ "requestUri":"/" }, "input":{"shape":"DisassociateAddressRequest"}, - "documentation":"

    Disassociates an Elastic IP address from the instance or network interface it's associated with.

    An Elastic IP address is for use in either the EC2-Classic platform or in a VPC. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.

    This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

    " + "documentation":"

    Disassociates an Elastic IP address from the instance or network interface it's associated with.

    This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

    " }, "DisassociateClientVpnTargetNetwork":{ "name":"DisassociateClientVpnTargetNetwork", @@ -4254,7 +4284,7 @@ }, "input":{"shape":"GetIpamPoolAllocationsRequest"}, "output":{"shape":"GetIpamPoolAllocationsResult"}, - "documentation":"

    Get a list of all the CIDR allocations in an IPAM pool.

    If you use this action after AllocateIpamPoolCidr or ReleaseIpamPoolAllocation, note that all EC2 API actions follow an eventual consistency model.

    " + "documentation":"

    Get a list of all the CIDR allocations in an IPAM pool. The Region you use should be the IPAM pool locale. The locale is the Amazon Web Services Region where this IPAM pool is available for allocations.

    If you use this action after AllocateIpamPoolCidr or ReleaseIpamPoolAllocation, note that all EC2 API actions follow an eventual consistency model.

    " }, "GetIpamPoolCidrs":{ "name":"GetIpamPoolCidrs", @@ -5225,7 +5255,7 @@ }, "input":{"shape":"MoveAddressToVpcRequest"}, "output":{"shape":"MoveAddressToVpcResult"}, - "documentation":"

    Moves an Elastic IP address from the EC2-Classic platform to the EC2-VPC platform. The Elastic IP address must be allocated to your account for more than 24 hours, and it must not be associated with an instance. After the Elastic IP address is moved, it is no longer available for use in the EC2-Classic platform, unless you move it back using the RestoreAddressToClassic request. You cannot move an Elastic IP address that was originally allocated for use in the EC2-VPC platform to the EC2-Classic platform.

    We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.

    " + "documentation":"

    This action is deprecated.

    Moves an Elastic IP address from the EC2-Classic platform to the EC2-VPC platform. The Elastic IP address must be allocated to your account for more than 24 hours, and it must not be associated with an instance. After the Elastic IP address is moved, it is no longer available for use in the EC2-Classic platform, unless you move it back using the RestoreAddressToClassic request. You cannot move an Elastic IP address that was originally allocated for use in the EC2-VPC platform to the EC2-Classic platform.

    " }, "MoveByoipCidrToIpam":{ "name":"MoveByoipCidrToIpam", @@ -5403,7 +5433,7 @@ "requestUri":"/" }, "input":{"shape":"ReleaseAddressRequest"}, - "documentation":"

    Releases the specified Elastic IP address.

    [EC2-Classic, default VPC] Releasing an Elastic IP address automatically disassociates it from any instance that it's associated with. To disassociate an Elastic IP address without releasing it, use DisassociateAddress.

    We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.

    [Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic IP address before you can release it. Otherwise, Amazon EC2 returns an error (InvalidIPAddress.InUse).

    After releasing an Elastic IP address, it is released to the IP address pool. Be sure to update your DNS records and any servers or devices that communicate with the address. If you attempt to release an Elastic IP address that you already released, you'll get an AuthFailure error if the address is already allocated to another Amazon Web Services account.

    [EC2-VPC] After you release an Elastic IP address for use in a VPC, you might be able to recover it. For more information, see AllocateAddress.

    For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

    " + "documentation":"

    Releases the specified Elastic IP address.

    [Default VPC] Releasing an Elastic IP address automatically disassociates it from any instance that it's associated with. To disassociate an Elastic IP address without releasing it, use DisassociateAddress.

    [Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic IP address before you can release it. Otherwise, Amazon EC2 returns an error (InvalidIPAddress.InUse).

    After releasing an Elastic IP address, it is released to the IP address pool. Be sure to update your DNS records and any servers or devices that communicate with the address. If you attempt to release an Elastic IP address that you already released, you'll get an AuthFailure error if the address is already allocated to another Amazon Web Services account.

    After you release an Elastic IP address, you might be able to recover it. For more information, see AllocateAddress.

    " }, "ReleaseHosts":{ "name":"ReleaseHosts", @@ -5423,7 +5453,7 @@ }, "input":{"shape":"ReleaseIpamPoolAllocationRequest"}, "output":{"shape":"ReleaseIpamPoolAllocationResult"}, - "documentation":"

    Release an allocation within an IPAM pool. You can only use this action to release manual allocations. To remove an allocation for a resource without deleting the resource, set its monitored state to false using ModifyIpamResourceCidr. For more information, see Release an allocation in the Amazon VPC IPAM User Guide.

    All EC2 API actions follow an eventual consistency model.

    " + "documentation":"

    Release an allocation within an IPAM pool. The Region you use should be the IPAM pool locale. The locale is the Amazon Web Services Region where this IPAM pool is available for allocations. You can only use this action to release manual allocations. To remove an allocation for a resource without deleting the resource, set its monitored state to false using ModifyIpamResourceCidr. For more information, see Release an allocation in the Amazon VPC IPAM User Guide.

    All EC2 API actions follow an eventual consistency model.

    " }, "ReplaceIamInstanceProfileAssociation":{ "name":"ReplaceIamInstanceProfileAssociation", @@ -5596,7 +5626,7 @@ }, "input":{"shape":"RestoreAddressToClassicRequest"}, "output":{"shape":"RestoreAddressToClassicResult"}, - "documentation":"

    Restores an Elastic IP address that was previously moved to the EC2-VPC platform back to the EC2-Classic platform. You cannot move an Elastic IP address that was originally allocated for use in EC2-VPC. The Elastic IP address must not be associated with an instance or network interface.

    We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.

    " + "documentation":"

    This action is deprecated.

    Restores an Elastic IP address that was previously moved to the EC2-VPC platform back to the EC2-Classic platform. You cannot move an Elastic IP address that was originally allocated for use in EC2-VPC. The Elastic IP address must not be associated with an instance or network interface.

    " }, "RestoreImageFromRecycleBin":{ "name":"RestoreImageFromRecycleBin", @@ -6500,17 +6530,17 @@ }, "AllocationId":{ "shape":"String", - "documentation":"

    The ID representing the allocation of the address for use with EC2-VPC.

    ", + "documentation":"

    The ID representing the allocation of the address.

    ", "locationName":"allocationId" }, "AssociationId":{ "shape":"String", - "documentation":"

    The ID representing the association of the address with an instance in a VPC.

    ", + "documentation":"

    The ID representing the association of the address with an instance.

    ", "locationName":"associationId" }, "Domain":{ "shape":"DomainType", - "documentation":"

    Indicates whether this Elastic IP address is for use with instances in EC2-Classic (standard) or instances in a VPC (vpc).

    ", + "documentation":"

    The network (vpc).

    ", "locationName":"domain" }, "NetworkInterfaceId":{ @@ -6704,11 +6734,11 @@ "members":{ "Domain":{ "shape":"DomainType", - "documentation":"

    Indicates whether the Elastic IP address is for use with instances in a VPC or instances in EC2-Classic.

    Default: If the Region supports EC2-Classic, the default is standard. Otherwise, the default is vpc.

    " + "documentation":"

    The network (vpc).

    " }, "Address":{ "shape":"PublicIpAddress", - "documentation":"

    [EC2-VPC] The Elastic IP address to recover or an IPv4 address from an address pool.

    " + "documentation":"

    The Elastic IP address to recover or an IPv4 address from an address pool.

    " }, "PublicIpv4Pool":{ "shape":"Ipv4PoolEc2Id", @@ -6744,7 +6774,7 @@ }, "AllocationId":{ "shape":"String", - "documentation":"

    [EC2-VPC] The ID that Amazon Web Services assigns to represent the allocation of the Elastic IP address for use with instances in a VPC.

    ", + "documentation":"

    The ID that represents the allocation of the Elastic IP address.

    ", "locationName":"allocationId" }, "PublicIpv4Pool":{ @@ -6759,7 +6789,7 @@ }, "Domain":{ "shape":"DomainType", - "documentation":"

    Indicates whether the Elastic IP address is for use with instances in a VPC (vpc) or instances in EC2-Classic (standard).

    ", + "documentation":"

    The network (vpc).

    ", "locationName":"domain" }, "CustomerOwnedIp":{ @@ -6774,17 +6804,14 @@ }, "CarrierIp":{ "shape":"String", - "documentation":"

    The carrier IP address. This option is only available for network interfaces which reside in a subnet in a Wavelength Zone (for example an EC2 instance).

    ", + "documentation":"

    The carrier IP address. This option is only available for network interfaces that reside in a subnet in a Wavelength Zone.

    ", "locationName":"carrierIp" } } }, "AllocateHostsRequest":{ "type":"structure", - "required":[ - "AvailabilityZone", - "Quantity" - ], + "required":["AvailabilityZone"], "members":{ "AutoPlacement":{ "shape":"AutoPlacement", @@ -6812,7 +6839,7 @@ }, "Quantity":{ "shape":"Integer", - "documentation":"

    The number of Dedicated Hosts to allocate to your account with these parameters.

    ", + "documentation":"

    The number of Dedicated Hosts to allocate to your account with these parameters. If you are allocating the Dedicated Hosts on an Outpost, and you specify AssetIds, you can omit this parameter. In this case, Amazon EC2 allocates a Dedicated Host on each specified hardware asset. If you specify both AssetIds and Quantity, then the value that you specify for Quantity must be equal to the number of asset IDs specified.

    ", "locationName":"quantity" }, "TagSpecifications":{ @@ -6826,11 +6853,16 @@ }, "OutpostArn":{ "shape":"String", - "documentation":"

    The Amazon Resource Name (ARN) of the Amazon Web Services Outpost on which to allocate the Dedicated Host.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the Amazon Web Services Outpost on which to allocate the Dedicated Host. If you specify OutpostArn, you can optionally specify AssetIds.

    If you are allocating the Dedicated Host in a Region, omit this parameter.

    " }, "HostMaintenance":{ "shape":"HostMaintenance", "documentation":"

    Indicates whether to enable or disable host maintenance for the Dedicated Host. For more information, see Host maintenance in the Amazon EC2 User Guide.

    " + }, + "AssetIds":{ + "shape":"AssetIdList", + "documentation":"

    The IDs of the Outpost hardware assets on which to allocate the Dedicated Hosts. Targeting specific hardware assets on an Outpost can help to minimize latency between your workloads. This parameter is supported only if you specify OutpostArn. If you are allocating the Dedicated Hosts in a Region, omit this parameter.

    • If you specify this parameter, you can omit Quantity. In this case, Amazon EC2 allocates a Dedicated Host on each specified hardware asset.

    • If you specify both AssetIds and Quantity, then the value for Quantity must be equal to the number of asset IDs specified.

    ", + "locationName":"AssetId" } } }, @@ -7369,6 +7401,11 @@ "locationName":"item" } }, + "AssetId":{"type":"string"}, + "AssetIdList":{ + "type":"list", + "member":{"shape":"AssetId"} + }, "AssignIpv6AddressesRequest":{ "type":"structure", "required":["NetworkInterfaceId"], @@ -7536,19 +7573,19 @@ "members":{ "AllocationId":{ "shape":"AllocationId", - "documentation":"

    [EC2-VPC] The allocation ID. This is required for EC2-VPC.

    " + "documentation":"

    The allocation ID. This is required.

    " }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

    The ID of the instance. The instance must have exactly one attached network interface. For EC2-VPC, you can specify either the instance ID or the network interface ID, but not both. For EC2-Classic, you must specify an instance ID and the instance must be in the running state.

    " + "documentation":"

    The ID of the instance. The instance must have exactly one attached network interface. You can specify either the instance ID or the network interface ID, but not both.

    " }, "PublicIp":{ "shape":"EipAllocationPublicIp", - "documentation":"

    [EC2-Classic] The Elastic IP address to associate with the instance. This is required for EC2-Classic.

    " + "documentation":"

    Deprecated.

    " }, "AllowReassociation":{ "shape":"Boolean", - "documentation":"

    [EC2-VPC] For a VPC in an EC2-Classic account, specify true to allow an Elastic IP address that is already associated with an instance or network interface to be reassociated with the specified instance or network interface. Otherwise, the operation fails. In a VPC in an EC2-VPC-only account, reassociation is automatic, therefore you can specify false to ensure the operation fails if the Elastic IP address is already associated with another resource.

    ", + "documentation":"

    Reassociation is automatic, but you can specify false to ensure the operation fails if the Elastic IP address is already associated with another resource.

    ", "locationName":"allowReassociation" }, "DryRun":{ @@ -7558,12 +7595,12 @@ }, "NetworkInterfaceId":{ "shape":"NetworkInterfaceId", - "documentation":"

    [EC2-VPC] The ID of the network interface. If the instance has more than one network interface, you must specify a network interface ID.

    For EC2-VPC, you can specify either the instance ID or the network interface ID, but not both.

    ", + "documentation":"

    The ID of the network interface. If the instance has more than one network interface, you must specify a network interface ID.

    You can specify either the instance ID or the network interface ID, but not both.

    ", "locationName":"networkInterfaceId" }, "PrivateIpAddress":{ "shape":"String", - "documentation":"

    [EC2-VPC] The primary or secondary private IP address to associate with the Elastic IP address. If no private IP address is specified, the Elastic IP address is associated with the primary private IP address.

    ", + "documentation":"

    The primary or secondary private IP address to associate with the Elastic IP address. If no private IP address is specified, the Elastic IP address is associated with the primary private IP address.

    ", "locationName":"privateIpAddress" } } @@ -7573,7 +7610,7 @@ "members":{ "AssociationId":{ "shape":"String", - "documentation":"

    [EC2-VPC] The ID that represents the association of the Elastic IP address with an instance.

    ", + "documentation":"

    The ID that represents the association of the Elastic IP address with an instance.

    ", "locationName":"associationId" } } @@ -8830,6 +8867,7 @@ }, "documentation":"

    Describes Availability Zones, Local Zones, and Wavelength Zones.

    " }, + "AvailabilityZoneId":{"type":"string"}, "AvailabilityZoneList":{ "type":"list", "member":{ @@ -11412,7 +11450,7 @@ }, "AmdSevSnp":{ "shape":"AmdSevSnpSpecification", - "documentation":"

    Indicates whether the instance is enabled for AMD SEV-SNP.

    ", + "documentation":"

    Indicates whether the instance is enabled for AMD SEV-SNP. For more information, see AMD SEV-SNP.

    ", "locationName":"amdSevSnp" } }, @@ -11431,7 +11469,7 @@ }, "AmdSevSnp":{ "shape":"AmdSevSnpSpecification", - "documentation":"

    Indicates whether to enable the instance for AMD SEV-SNP. AMD SEV-SNP is supported with M6a, R6a, and C6a instance types only.

    " + "documentation":"

    Indicates whether to enable the instance for AMD SEV-SNP. AMD SEV-SNP is supported with M6a, R6a, and C6a instance types only. For more information, see AMD SEV-SNP.

    " } }, "documentation":"

    The CPU options for the instance. Both the core count and threads per core must be specified in the request.

    " @@ -11565,11 +11603,11 @@ "documentation":"

    The type of operating system for which to reserve capacity.

    " }, "AvailabilityZone":{ - "shape":"String", + "shape":"AvailabilityZoneName", "documentation":"

    The Availability Zone in which to create the Capacity Reservation.

    " }, "AvailabilityZoneId":{ - "shape":"String", + "shape":"AvailabilityZoneId", "documentation":"

    The ID of the Availability Zone in which to create the Capacity Reservation.

    " }, "Tenancy":{ @@ -12390,6 +12428,54 @@ } } }, + "CreateInstanceConnectEndpointRequest":{ + "type":"structure", + "required":["SubnetId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    " + }, + "SubnetId":{ + "shape":"SubnetId", + "documentation":"

    The ID of the subnet in which to create the EC2 Instance Connect Endpoint.

    " + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIdStringListRequest", + "documentation":"

    One or more security groups to associate with the endpoint. If you don't specify a security group, the default security group for your VPC will be associated with the endpoint.

    ", + "locationName":"SecurityGroupId" + }, + "PreserveClientIp":{ + "shape":"Boolean", + "documentation":"

    Indicates whether your client's IP address is preserved as the source. The value is true or false.

    • If true, your client's IP address is used when you connect to a resource.

    • If false, the elastic network interface IP address is used when you connect to a resource.

    Default: true

    " + }, + "ClientToken":{ + "shape":"String", + "documentation":"

    Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

    ", + "idempotencyToken":true + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

    The tags to apply to the EC2 Instance Connect Endpoint during creation.

    ", + "locationName":"TagSpecification" + } + } + }, + "CreateInstanceConnectEndpointResult":{ + "type":"structure", + "members":{ + "InstanceConnectEndpoint":{ + "shape":"Ec2InstanceConnectEndpoint", + "documentation":"

    Information about the EC2 Instance Connect Endpoint.

    ", + "locationName":"instanceConnectEndpoint" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

    Unique, case-sensitive idempotency token provided by the client in the the request.

    ", + "locationName":"clientToken" + } + } + }, "CreateInstanceEventWindowRequest":{ "type":"structure", "members":{ @@ -13397,7 +13483,7 @@ }, "InterfaceType":{ "shape":"NetworkInterfaceCreationType", - "documentation":"

    The type of network interface. The default is interface.

    The only supported values are efa and trunk.

    " + "documentation":"

    The type of network interface. The default is interface.

    The only supported values are interface, efa, and trunk.

    " }, "SubnetId":{ "shape":"SubnetId", @@ -16150,6 +16236,30 @@ } } }, + "DeleteInstanceConnectEndpointRequest":{ + "type":"structure", + "required":["InstanceConnectEndpointId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    " + }, + "InstanceConnectEndpointId":{ + "shape":"InstanceConnectEndpointId", + "documentation":"

    The ID of the EC2 Instance Connect Endpoint to delete.

    " + } + } + }, + "DeleteInstanceConnectEndpointResult":{ + "type":"structure", + "members":{ + "InstanceConnectEndpoint":{ + "shape":"Ec2InstanceConnectEndpoint", + "documentation":"

    Information about the EC2 Instance Connect Endpoint.

    ", + "locationName":"instanceConnectEndpoint" + } + } + }, "DeleteInstanceEventWindowRequest":{ "type":"structure", "required":["InstanceEventWindowId"], @@ -18028,7 +18138,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

    One or more filters. Filter names and values are case-sensitive.

    • allocation-id - [EC2-VPC] The allocation ID for the address.

    • association-id - [EC2-VPC] The association ID for the address.

    • domain - Indicates whether the address is for use in EC2-Classic (standard) or in a VPC (vpc).

    • instance-id - The ID of the instance the address is associated with, if any.

    • network-border-group - A unique set of Availability Zones, Local Zones, or Wavelength Zones from where Amazon Web Services advertises IP addresses.

    • network-interface-id - [EC2-VPC] The ID of the network interface that the address is associated with, if any.

    • network-interface-owner-id - The Amazon Web Services account ID of the owner.

    • private-ip-address - [EC2-VPC] The private IP address associated with the Elastic IP address.

    • public-ip - The Elastic IP address, or the carrier IP address.

    • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    ", + "documentation":"

    One or more filters. Filter names and values are case-sensitive.

    • allocation-id - The allocation ID for the address.

    • association-id - The association ID for the address.

    • instance-id - The ID of the instance the address is associated with, if any.

    • network-border-group - A unique set of Availability Zones, Local Zones, or Wavelength Zones from where Amazon Web Services advertises IP addresses.

    • network-interface-id - The ID of the network interface that the address is associated with, if any.

    • network-interface-owner-id - The Amazon Web Services account ID of the owner.

    • private-ip-address - The private IP address associated with the Elastic IP address.

    • public-ip - The Elastic IP address, or the carrier IP address.

    • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    ", "locationName":"Filter" }, "PublicIps":{ @@ -18038,7 +18148,7 @@ }, "AllocationIds":{ "shape":"AllocationIdList", - "documentation":"

    [EC2-VPC] Information about the allocation IDs.

    ", + "documentation":"

    Information about the allocation IDs.

    ", "locationName":"AllocationId" }, "DryRun":{ @@ -19937,6 +20047,48 @@ } } }, + "DescribeInstanceConnectEndpointsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    " + }, + "MaxResults":{ + "shape":"InstanceConnectEndpointMaxResults", + "documentation":"

    The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

    " + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

    One or more filters.

    • instance-connect-endpoint-id - The ID of the EC2 Instance Connect Endpoint.

    • state - The state of the EC2 Instance Connect Endpoint (create-in-progress | create-complete | create-failed | delete-in-progress | delete-complete | delete-failed).

    • subnet-id - The ID of the subnet in which the EC2 Instance Connect Endpoint was created.

    • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    • tag-value - The value of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific value, regardless of tag key.

    • vpc-id - The ID of the VPC in which the EC2 Instance Connect Endpoint was created.

    ", + "locationName":"Filter" + }, + "InstanceConnectEndpointIds":{ + "shape":"ValueStringList", + "documentation":"

    One or more EC2 Instance Connect Endpoint IDs.

    ", + "locationName":"InstanceConnectEndpointId" + } + } + }, + "DescribeInstanceConnectEndpointsResult":{ + "type":"structure", + "members":{ + "InstanceConnectEndpoints":{ + "shape":"InstanceConnectEndpointSet", + "documentation":"

    Information about the EC2 Instance Connect Endpoints.

    ", + "locationName":"instanceConnectEndpointSet" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The token to include in another request to get the next page of items. This value is null when there are no more items to return.

    ", + "locationName":"nextToken" + } + } + }, "DescribeInstanceCreditSpecificationsMaxResults":{ "type":"integer", "max":1000, @@ -20149,7 +20301,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

    One or more filters. Filter names and values are case-sensitive.

    • auto-recovery-supported - Indicates whether Amazon CloudWatch action based recovery is supported (true | false).

    • bare-metal - Indicates whether it is a bare metal instance type (true | false).

    • burstable-performance-supported - Indicates whether it is a burstable performance instance type (true | false).

    • current-generation - Indicates whether this instance type is the latest generation instance type of an instance family (true | false).

    • ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline bandwidth performance for an EBS-optimized instance type, in Mbps.

    • ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage operations per second for an EBS-optimized instance type.

    • ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline throughput performance for an EBS-optimized instance type, in MB/s.

    • ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth performance for an EBS-optimized instance type, in Mbps.

    • ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage operations per second for an EBS-optimized instance type.

    • ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum throughput performance for an EBS-optimized instance type, in MB/s.

    • ebs-info.ebs-optimized-support - Indicates whether the instance type is EBS-optimized (supported | unsupported | default).

    • ebs-info.encryption-support - Indicates whether EBS encryption is supported (supported | unsupported).

    • ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for EBS volumes (required | supported | unsupported).

    • free-tier-eligible - Indicates whether the instance type is eligible to use in the free tier (true | false).

    • hibernation-supported - Indicates whether On-Demand hibernation is supported (true | false).

    • hypervisor - The hypervisor (nitro | xen).

    • instance-storage-info.disk.count - The number of local disks.

    • instance-storage-info.disk.size-in-gb - The storage size of each instance storage disk, in GB.

    • instance-storage-info.disk.type - The storage technology for the local instance storage disks (hdd | ssd).

    • instance-storage-info.encryption-support - Indicates whether data is encrypted at rest (required | supported | unsupported).

    • instance-storage-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for instance store (required | supported | unsupported).

    • instance-storage-info.total-size-in-gb - The total amount of storage available from all local instance storage, in GB.

    • instance-storage-supported - Indicates whether the instance type has local instance storage (true | false).

    • instance-type - The instance type (for example c5.2xlarge or c5*).

    • memory-info.size-in-mib - The memory size.

    • network-info.efa-info.maximum-efa-interfaces - The maximum number of Elastic Fabric Adapters (EFAs) per instance.

    • network-info.efa-supported - Indicates whether the instance type supports Elastic Fabric Adapter (EFA) (true | false).

    • network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is supported or required (required | supported | unsupported).

    • network-info.encryption-in-transit-supported - Indicates whether the instance type automatically encrypts in-transit traffic between instances (true | false).

    • network-info.ipv4-addresses-per-interface - The maximum number of private IPv4 addresses per network interface.

    • network-info.ipv6-addresses-per-interface - The maximum number of private IPv6 addresses per network interface.

    • network-info.ipv6-supported - Indicates whether the instance type supports IPv6 (true | false).

    • network-info.maximum-network-cards - The maximum number of network cards per instance.

    • network-info.maximum-network-interfaces - The maximum number of network interfaces per instance.

    • network-info.network-performance - The network performance (for example, \"25 Gigabit\").

    • processor-info.supported-architecture - The CPU architecture (arm64 | i386 | x86_64).

    • processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in GHz.

    • supported-boot-mode - The boot mode (legacy-bios | uefi).

    • supported-root-device-type - The root device type (ebs | instance-store).

    • supported-usage-class - The usage class (on-demand | spot).

    • supported-virtualization-type - The virtualization type (hvm | paravirtual).

    • vcpu-info.default-cores - The default number of cores for the instance type.

    • vcpu-info.default-threads-per-core - The default number of threads per core for the instance type.

    • vcpu-info.default-vcpus - The default number of vCPUs for the instance type.

    • vcpu-info.valid-cores - The number of cores that can be configured for the instance type.

    • vcpu-info.valid-threads-per-core - The number of threads per core that can be configured for the instance type. For example, \"1\" or \"1,2\".

    ", + "documentation":"

    One or more filters. Filter names and values are case-sensitive.

    • auto-recovery-supported - Indicates whether Amazon CloudWatch action based recovery is supported (true | false).

    • bare-metal - Indicates whether it is a bare metal instance type (true | false).

    • burstable-performance-supported - Indicates whether it is a burstable performance instance type (true | false).

    • current-generation - Indicates whether this instance type is the latest generation instance type of an instance family (true | false).

    • ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline bandwidth performance for an EBS-optimized instance type, in Mbps.

    • ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage operations per second for an EBS-optimized instance type.

    • ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline throughput performance for an EBS-optimized instance type, in MB/s.

    • ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth performance for an EBS-optimized instance type, in Mbps.

    • ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage operations per second for an EBS-optimized instance type.

    • ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum throughput performance for an EBS-optimized instance type, in MB/s.

    • ebs-info.ebs-optimized-support - Indicates whether the instance type is EBS-optimized (supported | unsupported | default).

    • ebs-info.encryption-support - Indicates whether EBS encryption is supported (supported | unsupported).

    • ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for EBS volumes (required | supported | unsupported).

    • free-tier-eligible - Indicates whether the instance type is eligible to use in the free tier (true | false).

    • hibernation-supported - Indicates whether On-Demand hibernation is supported (true | false).

    • hypervisor - The hypervisor (nitro | xen).

    • instance-storage-info.disk.count - The number of local disks.

    • instance-storage-info.disk.size-in-gb - The storage size of each instance storage disk, in GB.

    • instance-storage-info.disk.type - The storage technology for the local instance storage disks (hdd | ssd).

    • instance-storage-info.encryption-support - Indicates whether data is encrypted at rest (required | supported | unsupported).

    • instance-storage-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for instance store (required | supported | unsupported).

    • instance-storage-info.total-size-in-gb - The total amount of storage available from all local instance storage, in GB.

    • instance-storage-supported - Indicates whether the instance type has local instance storage (true | false).

    • instance-type - The instance type (for example c5.2xlarge or c5*).

    • memory-info.size-in-mib - The memory size.

    • network-info.efa-info.maximum-efa-interfaces - The maximum number of Elastic Fabric Adapters (EFAs) per instance.

    • network-info.efa-supported - Indicates whether the instance type supports Elastic Fabric Adapter (EFA) (true | false).

    • network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is supported or required (required | supported | unsupported).

    • network-info.encryption-in-transit-supported - Indicates whether the instance type automatically encrypts in-transit traffic between instances (true | false).

    • network-info.ipv4-addresses-per-interface - The maximum number of private IPv4 addresses per network interface.

    • network-info.ipv6-addresses-per-interface - The maximum number of private IPv6 addresses per network interface.

    • network-info.ipv6-supported - Indicates whether the instance type supports IPv6 (true | false).

    • network-info.maximum-network-cards - The maximum number of network cards per instance.

    • network-info.maximum-network-interfaces - The maximum number of network interfaces per instance.

    • network-info.network-performance - The network performance (for example, \"25 Gigabit\").

    • processor-info.supported-architecture - The CPU architecture (arm64 | i386 | x86_64).

    • processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in GHz.

    • processor-info.supported-features - The supported CPU features (amd-sev-snp).

    • supported-boot-mode - The boot mode (legacy-bios | uefi).

    • supported-root-device-type - The root device type (ebs | instance-store).

    • supported-usage-class - The usage class (on-demand | spot).

    • supported-virtualization-type - The virtualization type (hvm | paravirtual).

    • vcpu-info.default-cores - The default number of cores for the instance type.

    • vcpu-info.default-threads-per-core - The default number of threads per core for the instance type.

    • vcpu-info.default-vcpus - The default number of vCPUs for the instance type.

    • vcpu-info.valid-cores - The number of cores that can be configured for the instance type.

    • vcpu-info.valid-threads-per-core - The number of threads per core that can be configured for the instance type. For example, \"1\" or \"1,2\".

    ", "locationName":"Filter" }, "MaxResults":{ @@ -20167,7 +20319,7 @@ "members":{ "InstanceTypes":{ "shape":"InstanceTypeInfoList", - "documentation":"

    The instance type. For more information, see Instance types in the Amazon EC2 User Guide.

    ", + "documentation":"

    The instance type. For more information, see Instance types in the Amazon EC2 User Guide.

    When you change your EBS-backed instance type, instance restart or replacement behavior depends on the instance type compatibility between the old and new types. An instance that's backed by an instance store volume is always replaced. For more information, see Change the instance type in the Amazon EC2 User Guide.

    ", "locationName":"instanceTypeSet" }, "NextToken":{ @@ -25147,11 +25299,11 @@ "members":{ "AssociationId":{ "shape":"ElasticIpAssociationId", - "documentation":"

    [EC2-VPC] The association ID. Required for EC2-VPC.

    " + "documentation":"

    The association ID. This parameter is required.

    " }, "PublicIp":{ "shape":"EipAllocationPublicIp", - "documentation":"

    [EC2-Classic] The Elastic IP address. Required for EC2-Classic.

    " + "documentation":"

    Deprecated.

    " }, "DryRun":{ "shape":"Boolean", @@ -25977,6 +26129,98 @@ "default" ] }, + "Ec2InstanceConnectEndpoint":{ + "type":"structure", + "members":{ + "OwnerId":{ + "shape":"String", + "documentation":"

    The ID of the Amazon Web Services account that created the EC2 Instance Connect Endpoint.

    ", + "locationName":"ownerId" + }, + "InstanceConnectEndpointId":{ + "shape":"InstanceConnectEndpointId", + "documentation":"

    The ID of the EC2 Instance Connect Endpoint.

    ", + "locationName":"instanceConnectEndpointId" + }, + "InstanceConnectEndpointArn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the EC2 Instance Connect Endpoint.

    ", + "locationName":"instanceConnectEndpointArn" + }, + "State":{ + "shape":"Ec2InstanceConnectEndpointState", + "documentation":"

    The current state of the EC2 Instance Connect Endpoint.

    ", + "locationName":"state" + }, + "StateMessage":{ + "shape":"String", + "documentation":"

    The message for the current state of the EC2 Instance Connect Endpoint. Can include a failure message.

    ", + "locationName":"stateMessage" + }, + "DnsName":{ + "shape":"String", + "documentation":"

    The DNS name of the EC2 Instance Connect Endpoint.

    ", + "locationName":"dnsName" + }, + "FipsDnsName":{ + "shape":"String", + "documentation":"

    ", + "locationName":"fipsDnsName" + }, + "NetworkInterfaceIds":{ + "shape":"NetworkInterfaceIdSet", + "documentation":"

    The ID of the elastic network interface that Amazon EC2 automatically created when creating the EC2 Instance Connect Endpoint.

    ", + "locationName":"networkInterfaceIdSet" + }, + "VpcId":{ + "shape":"VpcId", + "documentation":"

    The ID of the VPC in which the EC2 Instance Connect Endpoint was created.

    ", + "locationName":"vpcId" + }, + "AvailabilityZone":{ + "shape":"String", + "documentation":"

    The Availability Zone of the EC2 Instance Connect Endpoint.

    ", + "locationName":"availabilityZone" + }, + "CreatedAt":{ + "shape":"MillisecondDateTime", + "documentation":"

    The date and time that the EC2 Instance Connect Endpoint was created.

    ", + "locationName":"createdAt" + }, + "SubnetId":{ + "shape":"SubnetId", + "documentation":"

    The ID of the subnet in which the EC2 Instance Connect Endpoint was created.

    ", + "locationName":"subnetId" + }, + "PreserveClientIp":{ + "shape":"Boolean", + "documentation":"

    Indicates whether your client's IP address is preserved as the source. The value is true or false.

    • If true, your client's IP address is used when you connect to a resource.

    • If false, the elastic network interface IP address is used when you connect to a resource.

    Default: true

    ", + "locationName":"preserveClientIp" + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIdSet", + "documentation":"

    The security groups associated with the endpoint. If you didn't specify a security group, the default security group for your VPC is associated with the endpoint.

    ", + "locationName":"securityGroupIdSet" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags assigned to the EC2 Instance Connect Endpoint.

    ", + "locationName":"tagSet" + } + }, + "documentation":"

    The EC2 Instance Connect Endpoint.

    " + }, + "Ec2InstanceConnectEndpointState":{ + "type":"string", + "enum":[ + "create-in-progress", + "create-complete", + "create-failed", + "delete-in-progress", + "delete-complete", + "delete-failed" + ] + }, "EfaInfo":{ "type":"structure", "members":{ @@ -28243,7 +28487,7 @@ }, "ImageId":{ "shape":"ImageId", - "documentation":"

    The ID of the AMI. An AMI is required to launch an instance. The AMI ID must be specified here or in the launch template.

    ", + "documentation":"

    The ID of the AMI. An AMI is required to launch an instance. This parameter is only available for fleets of type instant. For fleets of type maintain and request, you must specify the AMI ID in the launch template.

    ", "locationName":"imageId" } }, @@ -28300,7 +28544,7 @@ }, "ImageId":{ "shape":"ImageId", - "documentation":"

    The ID of the AMI. An AMI is required to launch an instance. The AMI ID must be specified here or in the launch template.

    " + "documentation":"

    The ID of the AMI. An AMI is required to launch an instance. This parameter is only available for fleets of type instant. For fleets of type maintain and request, you must specify the AMI ID in the launch template.

    " } }, "documentation":"

    Describes overrides for a launch template.

    " @@ -30843,6 +31087,11 @@ "shape":"HostMaintenance", "documentation":"

    Indicates whether host maintenance is enabled or disabled for the Dedicated Host.

    ", "locationName":"hostMaintenance" + }, + "AssetId":{ + "shape":"AssetId", + "documentation":"

    The ID of the Outpost hardware asset on which the Dedicated Host is allocated.

    ", + "locationName":"assetId" } }, "documentation":"

    Describes the properties of the Dedicated Host.

    " @@ -32918,6 +33167,19 @@ }, "documentation":"

    Information about the number of instances that can be launched onto the Dedicated Host.

    " }, + "InstanceConnectEndpointId":{"type":"string"}, + "InstanceConnectEndpointMaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "InstanceConnectEndpointSet":{ + "type":"list", + "member":{ + "shape":"Ec2InstanceConnectEndpoint", + "locationName":"item" + } + }, "InstanceCount":{ "type":"structure", "members":{ @@ -37157,7 +37419,7 @@ "members":{ "LaunchTemplateSpecification":{ "shape":"FleetLaunchTemplateSpecification", - "documentation":"

    The launch template.

    ", + "documentation":"

    The launch template to use. Make sure that the launch template does not contain the NetworkInterfaceId parameter because you can't specify a network interface ID in a Spot Fleet.

    ", "locationName":"launchTemplateSpecification" }, "Overrides":{ @@ -39991,7 +40253,7 @@ }, "Tenancy":{ "shape":"HostTenancy", - "documentation":"

    The tenancy for the instance.

    For T3 instances, you can't change the tenancy from dedicated to host, or from host to dedicated. Attempting to make one of these unsupported tenancy changes results in the InvalidTenancy error code.

    ", + "documentation":"

    The tenancy for the instance.

    For T3 instances, you must launch the instance on a Dedicated Host to use a tenancy of host. You can't change the tenancy from host to dedicated or default. Attempting to make one of these unsupported tenancy changes results in an InvalidRequest error code.

    ", "locationName":"tenancy" }, "PartitionNumber":{ @@ -40000,7 +40262,7 @@ }, "HostResourceGroupArn":{ "shape":"String", - "documentation":"

    The ARN of the host resource group in which to place the instance.

    " + "documentation":"

    The ARN of the host resource group in which to place the instance. The instance must have a tenancy of host to specify this parameter.

    " }, "GroupId":{ "shape":"PlacementGroupId", @@ -41892,7 +42154,7 @@ "documentation":"

    The range of inside IPv6 addresses for the tunnel. Any specified CIDR blocks must be unique across all VPN connections that use the same transit gateway.

    Constraints: A size /126 CIDR block from the local fd00::/8 range.

    " }, "PreSharedKey":{ - "shape":"String", + "shape":"preSharedKey", "documentation":"

    The pre-shared key (PSK) to establish initial authentication between the virtual private gateway and the customer gateway.

    Constraints: Allowed characters are alphanumeric characters, periods (.), and underscores (_). Must be between 8 and 64 characters in length and cannot start with zero (0).

    " }, "Phase1LifetimeSeconds":{ @@ -41971,7 +42233,8 @@ "documentation":"

    Turn on or off tunnel endpoint lifecycle control feature.

    " } }, - "documentation":"

    The Amazon Web Services Site-to-Site VPN tunnel options to modify.

    " + "documentation":"

    The Amazon Web Services Site-to-Site VPN tunnel options to modify.

    ", + "sensitive":true }, "MonitorInstancesRequest":{ "type":"structure", @@ -42098,7 +42361,7 @@ "members":{ "MoveStatus":{ "shape":"MoveStatus", - "documentation":"

    The status of the Elastic IP address that's being moved to the EC2-VPC platform, or restored to the EC2-Classic platform.

    ", + "documentation":"

    The status of the Elastic IP address that's being moved or restored.

    ", "locationName":"moveStatus" }, "PublicIp":{ @@ -42107,7 +42370,7 @@ "locationName":"publicIp" } }, - "documentation":"

    Describes the status of a moving Elastic IP address.

    We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide.

    " + "documentation":"

    This action is deprecated.

    Describes the status of a moving Elastic IP address.

    " }, "MovingAddressStatusSet":{ "type":"list", @@ -43188,6 +43451,13 @@ "locationName":"item" } }, + "NetworkInterfaceIdSet":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, "NetworkInterfaceIpv6Address":{ "type":"structure", "members":{ @@ -44822,7 +45092,7 @@ }, "SupportedFeatures":{ "shape":"SupportedAdditionalProcessorFeatureList", - "documentation":"

    Indicates whether the instance type supports AMD SEV-SNP. If the request returns amd-sev-snp, AMD SEV-SNP is supported. Otherwise, it is not supported.

    ", + "documentation":"

    Indicates whether the instance type supports AMD SEV-SNP. If the request returns amd-sev-snp, AMD SEV-SNP is supported. Otherwise, it is not supported. For more information, see AMD SEV-SNP.

    ", "locationName":"supportedFeatures" } }, @@ -45899,11 +46169,11 @@ "members":{ "AllocationId":{ "shape":"AllocationId", - "documentation":"

    [EC2-VPC] The allocation ID. Required for EC2-VPC.

    " + "documentation":"

    The allocation ID. This parameter is required.

    " }, "PublicIp":{ "shape":"String", - "documentation":"

    [EC2-Classic] The Elastic IP address. Required for EC2-Classic.

    " + "documentation":"

    Deprecated.

    " }, "NetworkBorderGroup":{ "shape":"String", @@ -47771,7 +48041,8 @@ "vpn-connection-device-type", "vpc-block-public-access-exclusion", "ipam-resource-discovery", - "ipam-resource-discovery-association" + "ipam-resource-discovery-association", + "instance-connect-endpoint" ] }, "ResponseError":{ @@ -48714,7 +48985,7 @@ }, "InstanceType":{ "shape":"InstanceType", - "documentation":"

    The instance type. For more information, see Instance types in the Amazon EC2 User Guide.

    Default: m1.small

    " + "documentation":"

    The instance type. For more information, see Instance types in the Amazon EC2 User Guide.

    When you change your EBS-backed instance type, instance restart or replacement behavior depends on the instance type compatibility between the old and new types. An instance that's backed by an instance store volume is always replaced. For more information, see Change the instance type in the Amazon EC2 User Guide.

    Default: m1.small

    " }, "Ipv6AddressCount":{ "shape":"Integer", @@ -49656,6 +49927,13 @@ "locationName":"item" } }, + "SecurityGroupIdSet":{ + "type":"list", + "member":{ + "shape":"SecurityGroupId", + "locationName":"item" + } + }, "SecurityGroupIdStringList":{ "type":"list", "member":{ @@ -49663,6 +49941,15 @@ "locationName":"SecurityGroupId" } }, + "SecurityGroupIdStringListRequest":{ + "type":"list", + "member":{ + "shape":"SecurityGroupId", + "locationName":"SecurityGroupId" + }, + "max":16, + "min":0 + }, "SecurityGroupIdentifier":{ "type":"structure", "members":{ @@ -54817,7 +55104,7 @@ "locationName":"tunnelInsideIpv6Cidr" }, "PreSharedKey":{ - "shape":"String", + "shape":"preSharedKey", "documentation":"

    The pre-shared key (PSK) to establish initial authentication between the virtual private gateway and the customer gateway.

    ", "locationName":"preSharedKey" }, @@ -55913,9 +56200,17 @@ "KinesisDataFirehose":{ "shape":"VerifiedAccessLogKinesisDataFirehoseDestinationOptions", "documentation":"

    Sends Verified Access logs to Kinesis.

    " + }, + "LogVersion":{ + "shape":"String", + "documentation":"

    The logging version to use.

    Valid values: ocsf-0.1 | ocsf-1.0.0-rc.2

    " + }, + "IncludeTrustContext":{ + "shape":"Boolean", + "documentation":"

    Include trust data sent by trust providers into the logs.

    " } }, - "documentation":"

    Describes the destinations for Verified Access logs.

    " + "documentation":"

    Options for Verified Access logs.

    " }, "VerifiedAccessLogS3Destination":{ "type":"structure", @@ -55988,9 +56283,19 @@ "shape":"VerifiedAccessLogKinesisDataFirehoseDestination", "documentation":"

    Kinesis logging destination.

    ", "locationName":"kinesisDataFirehose" + }, + "LogVersion":{ + "shape":"String", + "documentation":"

    Describes current setting for the logging version.

    ", + "locationName":"logVersion" + }, + "IncludeTrustContext":{ + "shape":"Boolean", + "documentation":"

    Describes current setting for including trust data into the logs.

    ", + "locationName":"includeTrustContext" } }, - "documentation":"

    Describes the destinations for Verified Access logs.

    " + "documentation":"

    Describes the options for Verified Access logs.

    " }, "VerifiedAccessTrustProvider":{ "type":"structure", @@ -57296,7 +57601,7 @@ "type":"structure", "members":{ "CustomerGatewayConfiguration":{ - "shape":"String", + "shape":"customerGatewayConfiguration", "documentation":"

    The configuration information for the VPN connection's customer gateway (in the native XML format). This element is always present in the CreateVpnConnection response; however, it's present in the DescribeVpnConnections response only if the VPN connection is in the pending or available state.

    ", "locationName":"customerGatewayConfiguration" }, @@ -57670,7 +57975,7 @@ "documentation":"

    The range of inside IPv6 addresses for the tunnel. Any specified CIDR blocks must be unique across all VPN connections that use the same transit gateway.

    Constraints: A size /126 CIDR block from the local fd00::/8 range.

    " }, "PreSharedKey":{ - "shape":"String", + "shape":"preSharedKey", "documentation":"

    The pre-shared key (PSK) to establish initial authentication between the virtual private gateway and customer gateway.

    Constraints: Allowed characters are alphanumeric characters, periods (.), and underscores (_). Must be between 8 and 64 characters in length and cannot start with zero (0).

    " }, "Phase1LifetimeSeconds":{ @@ -57805,6 +58110,14 @@ "locationName":"ZoneName" } }, + "customerGatewayConfiguration":{ + "type":"string", + "sensitive":true + }, + "preSharedKey":{ + "type":"string", + "sensitive":true + }, "scope":{ "type":"string", "enum":[ diff --git a/services/ec2instanceconnect/pom.xml b/services/ec2instanceconnect/pom.xml index 1e66881680b5..29132b6909c3 100644 --- a/services/ec2instanceconnect/pom.xml +++ b/services/ec2instanceconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ec2instanceconnect AWS Java SDK :: Services :: EC2 Instance Connect diff --git a/services/ecr/pom.xml b/services/ecr/pom.xml index 77769c73ac70..08552dc80669 100644 --- a/services/ecr/pom.xml +++ b/services/ecr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ecr AWS Java SDK :: Services :: Amazon EC2 Container Registry diff --git a/services/ecrpublic/pom.xml b/services/ecrpublic/pom.xml index 519051e1a564..4d2671b34ce1 100644 --- a/services/ecrpublic/pom.xml +++ b/services/ecrpublic/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ecrpublic AWS Java SDK :: Services :: ECR PUBLIC diff --git a/services/ecs/pom.xml b/services/ecs/pom.xml index 94eee1e161b1..5b708e7a105a 100644 --- a/services/ecs/pom.xml +++ b/services/ecs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ecs AWS Java SDK :: Services :: Amazon EC2 Container Service diff --git a/services/ecs/src/main/resources/codegen-resources/service-2.json b/services/ecs/src/main/resources/codegen-resources/service-2.json index ec1431a2de53..608d286bcb10 100644 --- a/services/ecs/src/main/resources/codegen-resources/service-2.json +++ b/services/ecs/src/main/resources/codegen-resources/service-2.json @@ -186,7 +186,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ServerException"} ], - "documentation":"

    Deletes one or more task definitions.

    You must deregister a task definition revision before you delete it. For more information, see DeregisterTaskDefinition.

    When you delete a task definition revision, it is immediately transitions from the INACTIVE to DELETE_IN_PROGRESS. Existing tasks and services that reference a DELETE_IN_PROGRESS task definition revision continue to run without disruption. Existing services that reference a DELETE_IN_PROGRESS task definition revision can still scale up or down by modifying the service's desired count.

    You can't use a DELETE_IN_PROGRESS task definition revision to run new tasks or create new services. You also can't update an existing service to reference a DELETE_IN_PROGRESS task definition revision.

    A task definition revision will stay in DELETE_IN_PROGRESS status until all the associated tasks and services have been terminated.

    " + "documentation":"

    Deletes one or more task definitions.

    You must deregister a task definition revision before you delete it. For more information, see DeregisterTaskDefinition.

    When you delete a task definition revision, it is immediately transitions from the INACTIVE to DELETE_IN_PROGRESS. Existing tasks and services that reference a DELETE_IN_PROGRESS task definition revision continue to run without disruption. Existing services that reference a DELETE_IN_PROGRESS task definition revision can still scale up or down by modifying the service's desired count.

    You can't use a DELETE_IN_PROGRESS task definition revision to run new tasks or create new services. You also can't update an existing service to reference a DELETE_IN_PROGRESS task definition revision.

    A task definition revision will stay in DELETE_IN_PROGRESS status until all the associated tasks and services have been terminated.

    When you delete all INACTIVE task definition revisions, the task definition name is not displayed in the console and not returned in the API. If a task definition revisions are in the DELETE_IN_PROGRESS state, the task definition name is displayed in the console and returned in the API. The task definition name is retained by Amazon ECS and the revision is incremented the next time you create a task definition with that name.

    " }, "DeleteTaskSet":{ "name":"DeleteTaskSet", @@ -2068,11 +2068,11 @@ }, "enableECSManagedTags":{ "shape":"Boolean", - "documentation":"

    Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For more information, see Tagging your Amazon ECS resources in the Amazon Elastic Container Service Developer Guide.

    " + "documentation":"

    Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For more information, see Tagging your Amazon ECS resources in the Amazon Elastic Container Service Developer Guide.

    When you use Amazon ECS managed tags, you need to set the propagateTags request parameter.

    " }, "propagateTags":{ "shape":"PropagateTags", - "documentation":"

    Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the TagResource API action.

    " + "documentation":"

    Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the TagResource API action.

    The default is NONE.

    " }, "enableExecuteCommand":{ "shape":"Boolean", diff --git a/services/efs/pom.xml b/services/efs/pom.xml index 9a489087553e..21ccfe61edc0 100644 --- a/services/efs/pom.xml +++ b/services/efs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT efs AWS Java SDK :: Services :: Amazon Elastic File System diff --git a/services/efs/src/main/resources/codegen-resources/service-2.json b/services/efs/src/main/resources/codegen-resources/service-2.json index d7e1b151d717..1fb57e8f110c 100644 --- a/services/efs/src/main/resources/codegen-resources/service-2.json +++ b/services/efs/src/main/resources/codegen-resources/service-2.json @@ -29,7 +29,7 @@ {"shape":"AccessPointLimitExceeded"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Creates an EFS access point. An access point is an application-specific view into an EFS file system that applies an operating system user and group, and a file system path, to any file system request made through the access point. The operating system user and group override any identity information provided by the NFS client. The file system path is exposed as the access point's root directory. Applications using the access point can only access data in the application's own directory and any subdirectories. To learn more, see Mounting a file system using EFS access points.

    If multiple requests to create access points on the same file system are sent in quick succession, and the file system is near the limit of 1000 access points, you may experience a throttling response for these requests. This is to ensure that the file system does not exceed the stated access point limit.

    This operation requires permissions for the elasticfilesystem:CreateAccessPoint action.

    " + "documentation":"

    Creates an EFS access point. An access point is an application-specific view into an EFS file system that applies an operating system user and group, and a file system path, to any file system request made through the access point. The operating system user and group override any identity information provided by the NFS client. The file system path is exposed as the access point's root directory. Applications using the access point can only access data in the application's own directory and any subdirectories. To learn more, see Mounting a file system using EFS access points.

    If multiple requests to create access points on the same file system are sent in quick succession, and the file system is near the limit of 1,000 access points, you may experience a throttling response for these requests. This is to ensure that the file system does not exceed the stated access point limit.

    This operation requires permissions for the elasticfilesystem:CreateAccessPoint action.

    Access points can be tagged on creation. If tags are specified in the creation action, IAM performs additional authorization on the elasticfilesystem:TagResource action to verify if users have permissions to create tags. Therefore, you must grant explicit permissions to use the elasticfilesystem:TagResource action. For more information, see Granting permissions to tag resources during creation.

    " }, "CreateFileSystem":{ "name":"CreateFileSystem", @@ -49,7 +49,7 @@ {"shape":"ThroughputLimitExceeded"}, {"shape":"UnsupportedAvailabilityZone"} ], - "documentation":"

    Creates a new, empty file system. The operation requires a creation token in the request that Amazon EFS uses to ensure idempotent creation (calling the operation with same creation token has no effect). If a file system does not currently exist that is owned by the caller's Amazon Web Services account with the specified creation token, this operation does the following:

    • Creates a new, empty file system. The file system will have an Amazon EFS assigned ID, and an initial lifecycle state creating.

    • Returns with the description of the created file system.

    Otherwise, this operation returns a FileSystemAlreadyExists error with the ID of the existing file system.

    For basic use cases, you can use a randomly generated UUID for the creation token.

    The idempotent operation allows you to retry a CreateFileSystem call without risk of creating an extra file system. This can happen when an initial call fails in a way that leaves it uncertain whether or not a file system was actually created. An example might be that a transport level timeout occurred or your connection was reset. As long as you use the same creation token, if the initial call had succeeded in creating a file system, the client can learn of its existence from the FileSystemAlreadyExists error.

    For more information, see Creating a file system in the Amazon EFS User Guide.

    The CreateFileSystem call returns while the file system's lifecycle state is still creating. You can check the file system creation status by calling the DescribeFileSystems operation, which among other things returns the file system state.

    This operation accepts an optional PerformanceMode parameter that you choose for your file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created. For more information, see Amazon EFS performance modes.

    You can set the throughput mode for the file system using the ThroughputMode parameter.

    After the file system is fully created, Amazon EFS sets its lifecycle state to available, at which point you can create one or more mount targets for the file system in your VPC. For more information, see CreateMountTarget. You mount your Amazon EFS file system on an EC2 instances in your VPC by using the mount target. For more information, see Amazon EFS: How it Works.

    This operation requires permissions for the elasticfilesystem:CreateFileSystem action.

    " + "documentation":"

    Creates a new, empty file system. The operation requires a creation token in the request that Amazon EFS uses to ensure idempotent creation (calling the operation with same creation token has no effect). If a file system does not currently exist that is owned by the caller's Amazon Web Services account with the specified creation token, this operation does the following:

    • Creates a new, empty file system. The file system will have an Amazon EFS assigned ID, and an initial lifecycle state creating.

    • Returns with the description of the created file system.

    Otherwise, this operation returns a FileSystemAlreadyExists error with the ID of the existing file system.

    For basic use cases, you can use a randomly generated UUID for the creation token.

    The idempotent operation allows you to retry a CreateFileSystem call without risk of creating an extra file system. This can happen when an initial call fails in a way that leaves it uncertain whether or not a file system was actually created. An example might be that a transport level timeout occurred or your connection was reset. As long as you use the same creation token, if the initial call had succeeded in creating a file system, the client can learn of its existence from the FileSystemAlreadyExists error.

    For more information, see Creating a file system in the Amazon EFS User Guide.

    The CreateFileSystem call returns while the file system's lifecycle state is still creating. You can check the file system creation status by calling the DescribeFileSystems operation, which among other things returns the file system state.

    This operation accepts an optional PerformanceMode parameter that you choose for your file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. The performance mode can't be changed after the file system has been created. For more information, see Amazon EFS performance modes.

    You can set the throughput mode for the file system using the ThroughputMode parameter.

    After the file system is fully created, Amazon EFS sets its lifecycle state to available, at which point you can create one or more mount targets for the file system in your VPC. For more information, see CreateMountTarget. You mount your Amazon EFS file system on an EC2 instances in your VPC by using the mount target. For more information, see Amazon EFS: How it Works.

    This operation requires permissions for the elasticfilesystem:CreateFileSystem action.

    File systems can be tagged on creation. If tags are specified in the creation action, IAM performs additional authorization on the elasticfilesystem:TagResource action to verify if users have permissions to create tags. Therefore, you must grant explicit permissions to use the elasticfilesystem:TagResource action. For more information, see Granting permissions to tag resources during creation.

    " }, "CreateMountTarget":{ "name":"CreateMountTarget", @@ -1275,7 +1275,7 @@ "members":{ "Status":{ "shape":"ReplicationStatus", - "documentation":"

    Describes the status of the destination Amazon EFS file system. If the status is ERROR, the destination file system in the replication configuration is in a failed state and is unrecoverable. To access the file system data, restore a backup of the failed file system to a new file system.

    " + "documentation":"

    Describes the status of the destination Amazon EFS file system.

    • The Paused state occurs as a result of opting out of the source or destination Region after the replication configuration was created. To resume replication for the file system, you need to again opt in to the Amazon Web Services Region. For more information, see Managing Amazon Web Services Regions in the Amazon Web Services General Reference Guide.

    • The Error state occurs when either the source or the destination file system (or both) is in a failed state and is unrecoverable. For more information, see Monitoring replication status in the Amazon EFS User Guide. You must delete the replication configuration, and then restore the most recent backup of the failed file system (either the source or the destination) to a new file system.

    " }, "FileSystemId":{ "shape":"FileSystemId", diff --git a/services/eks/pom.xml b/services/eks/pom.xml index b482da99a756..02959770aed6 100644 --- a/services/eks/pom.xml +++ b/services/eks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT eks AWS Java SDK :: Services :: EKS diff --git a/services/elasticache/pom.xml b/services/elasticache/pom.xml index c7c74e5106cb..6a6fde9c0f96 100644 --- a/services/elasticache/pom.xml +++ b/services/elasticache/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT elasticache AWS Java SDK :: Services :: Amazon ElastiCache diff --git a/services/elasticbeanstalk/pom.xml b/services/elasticbeanstalk/pom.xml index c3e63b36dd7b..4ceb8f6711d4 100644 --- a/services/elasticbeanstalk/pom.xml +++ b/services/elasticbeanstalk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT elasticbeanstalk AWS Java SDK :: Services :: AWS Elastic Beanstalk diff --git a/services/elasticinference/pom.xml b/services/elasticinference/pom.xml index 86931535e16d..e170ddef49e1 100644 --- a/services/elasticinference/pom.xml +++ b/services/elasticinference/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT elasticinference AWS Java SDK :: Services :: Elastic Inference diff --git a/services/elasticloadbalancing/pom.xml b/services/elasticloadbalancing/pom.xml index 444dd11ab3b2..1eca8a925a0d 100644 --- a/services/elasticloadbalancing/pom.xml +++ b/services/elasticloadbalancing/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT elasticloadbalancing AWS Java SDK :: Services :: Elastic Load Balancing diff --git a/services/elasticloadbalancingv2/pom.xml b/services/elasticloadbalancingv2/pom.xml index e35fb7b167fb..c9562a7600a1 100644 --- a/services/elasticloadbalancingv2/pom.xml +++ b/services/elasticloadbalancingv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT elasticloadbalancingv2 AWS Java SDK :: Services :: Elastic Load Balancing V2 diff --git a/services/elasticsearch/pom.xml b/services/elasticsearch/pom.xml index 95ebef2d80cf..cdba59dc7aef 100644 --- a/services/elasticsearch/pom.xml +++ b/services/elasticsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT elasticsearch AWS Java SDK :: Services :: Amazon Elasticsearch Service diff --git a/services/elastictranscoder/pom.xml b/services/elastictranscoder/pom.xml index 2aef5ceba28e..54ee59664601 100644 --- a/services/elastictranscoder/pom.xml +++ b/services/elastictranscoder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT elastictranscoder AWS Java SDK :: Services :: Amazon Elastic Transcoder diff --git a/services/emr/pom.xml b/services/emr/pom.xml index 71839c4b5859..dc9725c376a7 100644 --- a/services/emr/pom.xml +++ b/services/emr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT emr AWS Java SDK :: Services :: Amazon EMR diff --git a/services/emrcontainers/pom.xml b/services/emrcontainers/pom.xml index 92789d992330..c834da576b3b 100644 --- a/services/emrcontainers/pom.xml +++ b/services/emrcontainers/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT emrcontainers AWS Java SDK :: Services :: EMR Containers diff --git a/services/emrserverless/pom.xml b/services/emrserverless/pom.xml index e6d742b835c6..f6ae896f679b 100644 --- a/services/emrserverless/pom.xml +++ b/services/emrserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT emrserverless AWS Java SDK :: Services :: EMR Serverless diff --git a/services/eventbridge/pom.xml b/services/eventbridge/pom.xml index f1f28b1c6012..877b4fa9f8e0 100644 --- a/services/eventbridge/pom.xml +++ b/services/eventbridge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT eventbridge AWS Java SDK :: Services :: EventBridge diff --git a/services/evidently/pom.xml b/services/evidently/pom.xml index 22743709e7ee..523316f259bb 100644 --- a/services/evidently/pom.xml +++ b/services/evidently/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT evidently AWS Java SDK :: Services :: Evidently diff --git a/services/finspace/pom.xml b/services/finspace/pom.xml index 985c91a7f7f4..685e77b205a7 100644 --- a/services/finspace/pom.xml +++ b/services/finspace/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT finspace AWS Java SDK :: Services :: Finspace diff --git a/services/finspacedata/pom.xml b/services/finspacedata/pom.xml index 63f0a303cb8a..3347edee49fe 100644 --- a/services/finspacedata/pom.xml +++ b/services/finspacedata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT finspacedata AWS Java SDK :: Services :: Finspace Data diff --git a/services/firehose/pom.xml b/services/firehose/pom.xml index 252a57360019..7a7b76b0f542 100644 --- a/services/firehose/pom.xml +++ b/services/firehose/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT firehose AWS Java SDK :: Services :: Amazon Kinesis Firehose diff --git a/services/fis/pom.xml b/services/fis/pom.xml index 0a149e92a055..37b4dcdb06a6 100644 --- a/services/fis/pom.xml +++ b/services/fis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT fis AWS Java SDK :: Services :: Fis diff --git a/services/fms/pom.xml b/services/fms/pom.xml index fa455242b918..8b12706ffd81 100644 --- a/services/fms/pom.xml +++ b/services/fms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT fms AWS Java SDK :: Services :: FMS diff --git a/services/forecast/pom.xml b/services/forecast/pom.xml index 8044a23a48ab..f785429cd204 100644 --- a/services/forecast/pom.xml +++ b/services/forecast/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT forecast AWS Java SDK :: Services :: Forecast diff --git a/services/forecastquery/pom.xml b/services/forecastquery/pom.xml index 62d59f8bf302..ba811347f3cd 100644 --- a/services/forecastquery/pom.xml +++ b/services/forecastquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT forecastquery AWS Java SDK :: Services :: Forecastquery diff --git a/services/frauddetector/pom.xml b/services/frauddetector/pom.xml index f054efb12281..020b3f841081 100644 --- a/services/frauddetector/pom.xml +++ b/services/frauddetector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT frauddetector AWS Java SDK :: Services :: FraudDetector diff --git a/services/fsx/pom.xml b/services/fsx/pom.xml index 54e5703c52d4..299f9e7a2477 100644 --- a/services/fsx/pom.xml +++ b/services/fsx/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT fsx AWS Java SDK :: Services :: FSx diff --git a/services/fsx/src/main/resources/codegen-resources/endpoint-tests.json b/services/fsx/src/main/resources/codegen-resources/endpoint-tests.json index 283b29959239..ee67cf2679d9 100644 --- a/services/fsx/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/fsx/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,8 +8,8 @@ } }, "params": { - "UseFIPS": false, "Region": "af-south-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -21,8 +21,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -34,8 +34,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-northeast-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -47,8 +47,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-northeast-2", + "UseFIPS": false, "UseDualStack": false } }, @@ -60,8 +60,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-northeast-3", + "UseFIPS": false, "UseDualStack": false } }, @@ -73,8 +73,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-south-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -86,8 +86,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-southeast-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -99,8 +99,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-southeast-2", + "UseFIPS": false, "UseDualStack": false } }, @@ -112,8 +112,8 @@ } }, "params": { - "UseFIPS": false, "Region": "ca-central-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -125,8 +125,8 @@ } }, "params": { - "UseFIPS": true, "Region": "ca-central-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -138,8 +138,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-central-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -151,8 +151,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-north-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -164,8 +164,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-south-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -177,8 +177,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-west-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -190,8 +190,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-west-2", + "UseFIPS": false, "UseDualStack": false } }, @@ -203,8 +203,8 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-west-3", + "UseFIPS": false, "UseDualStack": false } }, @@ -216,8 +216,8 @@ } }, "params": { - "UseFIPS": false, "Region": "me-south-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -229,8 +229,8 @@ } }, "params": { - "UseFIPS": false, "Region": "sa-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -242,8 +242,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -255,8 +255,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -268,8 +268,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-2", + "UseFIPS": false, "UseDualStack": false } }, @@ -281,8 +281,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-2", + "UseFIPS": true, "UseDualStack": false } }, @@ -294,8 +294,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-west-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -307,8 +307,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-west-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -320,8 +320,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-west-2", + "UseFIPS": false, "UseDualStack": false } }, @@ -333,8 +333,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-west-2", + "UseFIPS": true, "UseDualStack": false } }, @@ -346,8 +346,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -359,8 +359,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -372,8 +372,8 @@ } }, "params": { - "UseFIPS": false, "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -385,8 +385,8 @@ } }, "params": { - "UseFIPS": false, "Region": "cn-northwest-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -398,8 +398,8 @@ } }, "params": { - "UseFIPS": true, "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -411,8 +411,8 @@ } }, "params": { - "UseFIPS": true, "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -424,8 +424,8 @@ } }, "params": { - "UseFIPS": false, "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -437,8 +437,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-gov-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -450,8 +450,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-gov-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -463,8 +463,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-gov-west-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -476,8 +476,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-gov-west-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -489,8 +489,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-gov-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -502,8 +502,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-gov-east-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -513,8 +513,8 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "Region": "us-iso-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -526,8 +526,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-iso-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -537,8 +537,8 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "Region": "us-iso-east-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -550,8 +550,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-iso-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -561,8 +561,8 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -574,8 +574,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -585,8 +585,8 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -598,8 +598,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": false } }, @@ -611,8 +611,8 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -636,8 +636,8 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -648,8 +648,8 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, "Endpoint": "https://example.com" } diff --git a/services/fsx/src/main/resources/codegen-resources/service-2.json b/services/fsx/src/main/resources/codegen-resources/service-2.json index bb23540564b6..08c22d261a22 100644 --- a/services/fsx/src/main/resources/codegen-resources/service-2.json +++ b/services/fsx/src/main/resources/codegen-resources/service-2.json @@ -107,7 +107,7 @@ {"shape":"ServiceLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

    Creates an Amazon FSx for Lustre data repository association (DRA). A data repository association is a link between a directory on the file system and an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository associations on a file system. Data repository associations are supported for all file systems except for Scratch_1 deployment type.

    Each data repository association must have a unique Amazon FSx file system directory and a unique S3 bucket or prefix associated with it. You can configure a data repository association for automatic import only, for automatic export only, or for both. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.

    CreateDataRepositoryAssociation isn't supported on Amazon File Cache resources. To create a DRA on Amazon File Cache, use the CreateFileCache operation.

    ", + "documentation":"

    Creates an Amazon FSx for Lustre data repository association (DRA). A data repository association is a link between a directory on the file system and an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository associations on a file system. Data repository associations are supported on all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.

    Each data repository association must have a unique Amazon FSx file system directory and a unique S3 bucket or prefix associated with it. You can configure a data repository association for automatic import only, for automatic export only, or for both. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.

    CreateDataRepositoryAssociation isn't supported on Amazon File Cache resources. To create a DRA on Amazon File Cache, use the CreateFileCache operation.

    ", "idempotent":true }, "CreateDataRepositoryTask":{ @@ -304,7 +304,7 @@ {"shape":"ServiceLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

    Deletes a data repository association on an Amazon FSx for Lustre file system. Deleting the data repository association unlinks the file system from the Amazon S3 bucket. When deleting a data repository association, you have the option of deleting the data in the file system that corresponds to the data repository association. Data repository associations are supported for all file systems except for Scratch_1 deployment type.

    ", + "documentation":"

    Deletes a data repository association on an Amazon FSx for Lustre file system. Deleting the data repository association unlinks the file system from the Amazon S3 bucket. When deleting a data repository association, you have the option of deleting the data in the file system that corresponds to the data repository association. Data repository associations are supported on all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.

    ", "idempotent":true }, "DeleteFileCache":{ @@ -387,7 +387,8 @@ {"shape":"BadRequest"}, {"shape":"IncompatibleParameterError"}, {"shape":"InternalServerError"}, - {"shape":"VolumeNotFound"} + {"shape":"VolumeNotFound"}, + {"shape":"ServiceLimitExceeded"} ], "documentation":"

    Deletes an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume.

    " }, @@ -423,7 +424,7 @@ {"shape":"InvalidDataRepositoryType"}, {"shape":"InternalServerError"} ], - "documentation":"

    Returns the description of specific Amazon FSx for Lustre or Amazon File Cache data repository associations, if one or more AssociationIds values are provided in the request, or if filters are used in the request. Data repository associations are supported on Amazon File Cache resources and all Amazon FSx for Lustre file systems excluding Scratch_1 deployment types.

    You can use filters to narrow the response to include just data repository associations for specific file systems (use the file-system-id filter with the ID of the file system) or caches (use the file-cache-id filter with the ID of the cache), or data repository associations for a specific repository type (use the data-repository-type filter with a value of S3 or NFS). If you don't use filters, the response returns all data repository associations owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.

    When retrieving all data repository associations, you can paginate the response by using the optional MaxResults parameter to limit the number of data repository associations returned in a response. If more data repository associations remain, a NextToken value is returned in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

    ", + "documentation":"

    Returns the description of specific Amazon FSx for Lustre or Amazon File Cache data repository associations, if one or more AssociationIds values are provided in the request, or if filters are used in the request. Data repository associations are supported on Amazon File Cache resources and all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.

    You can use filters to narrow the response to include just data repository associations for specific file systems (use the file-system-id filter with the ID of the file system) or caches (use the file-cache-id filter with the ID of the cache), or data repository associations for a specific repository type (use the data-repository-type filter with a value of S3 or NFS). If you don't use filters, the response returns all data repository associations owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.

    When retrieving all data repository associations, you can paginate the response by using the optional MaxResults parameter to limit the number of data repository associations returned in a response. If more data repository associations remain, a NextToken value is returned in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

    ", "idempotent":true }, "DescribeDataRepositoryTasks":{ @@ -650,7 +651,7 @@ {"shape":"ServiceLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

    Updates the configuration of an existing data repository association on an Amazon FSx for Lustre file system. Data repository associations are supported for all file systems except for Scratch_1 deployment type.

    ", + "documentation":"

    Updates the configuration of an existing data repository association on an Amazon FSx for Lustre file system. Data repository associations are supported on all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.

    ", "idempotent":true }, "UpdateFileCache":{ @@ -724,7 +725,7 @@ {"shape":"StorageVirtualMachineNotFound"}, {"shape":"UnsupportedOperation"} ], - "documentation":"

    Updates an Amazon FSx for ONTAP storage virtual machine (SVM).

    " + "documentation":"

    Updates an FSx for ONTAP storage virtual machine (SVM).

    " }, "UpdateVolume":{ "name":"UpdateVolume", @@ -1615,11 +1616,11 @@ "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, "DeploymentType":{ "shape":"OpenZFSDeploymentType", - "documentation":"

    Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an Amazon Web Services Region . Valid values are the following:

    • SINGLE_AZ_1- (Default) Creates file systems with throughput capacities of 64 - 4,096 MB/s. Single_AZ_1 is available in all Amazon Web Services Regions where Amazon FSx for OpenZFS is available, except US West (Oregon).

    • SINGLE_AZ_2- Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache. Single_AZ_2 is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), and Europe (Ireland) Amazon Web Services Regions.

    For more information, see: Deployment type availability and File system performance in the Amazon FSx for OpenZFS User Guide.

    " + "documentation":"

    Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an Amazon Web Services Region . Valid values are the following:

    • SINGLE_AZ_1- (Default) Creates file systems with throughput capacities of 64 - 4,096 MBps. Single_AZ_1 is available in all Amazon Web Services Regions where Amazon FSx for OpenZFS is available, except US West (Oregon).

    • SINGLE_AZ_2- Creates file systems with throughput capacities of 160 - 10,240 MBps using an NVMe L2ARC cache. Single_AZ_2 is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), and Europe (Ireland) Amazon Web Services Regions.

    For more information, see: Deployment type availability and File system performance in the Amazon FSx for OpenZFS User Guide.

    " }, "ThroughputCapacity":{ "shape":"MegabytesPerSecond", - "documentation":"

    Specifies the throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second (MB/s). Valid values depend on the DeploymentType you choose, as follows:

    • For SINGLE_AZ_1, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s.

    • For SINGLE_AZ_2, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MB/s.

    You pay for additional throughput capacity that you provision.

    " + "documentation":"

    Specifies the throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second (MBps). Valid values depend on the DeploymentType you choose, as follows:

    • For SINGLE_AZ_1, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MBps.

    • For SINGLE_AZ_2, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MBps.

    You pay for additional throughput capacity that you provision.

    " }, "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"}, "DiskIopsConfiguration":{"shape":"DiskIopsConfiguration"}, @@ -2008,7 +2009,7 @@ }, "DNSName":{ "type":"string", - "documentation":"

    The Domain Name Service (DNS) name for the file system. You can mount your file system using its DNS name.

    ", + "documentation":"

    The file system's DNS name. You can mount your file system using its DNS name.

    ", "max":275, "min":16, "pattern":"^((fs|fc)i?-[0-9a-f]{8,}\\..{4,253})$" @@ -2080,7 +2081,7 @@ "documentation":"

    The configuration for an NFS data repository linked to an Amazon File Cache resource with a data repository association.

    " } }, - "documentation":"

    The configuration of a data repository association that links an Amazon FSx for Lustre file system to an Amazon S3 bucket or an Amazon File Cache resource to an Amazon S3 bucket or an NFS file system. The data repository association configuration object is returned in the response of the following operations:

    • CreateDataRepositoryAssociation

    • UpdateDataRepositoryAssociation

    • DescribeDataRepositoryAssociations

    Data repository associations are supported on Amazon File Cache resources and all Amazon FSx for Lustre file systems excluding Scratch_1 deployment types.

    " + "documentation":"

    The configuration of a data repository association that links an Amazon FSx for Lustre file system to an Amazon S3 bucket or an Amazon File Cache resource to an Amazon S3 bucket or an NFS file system. The data repository association configuration object is returned in the response of the following operations:

    • CreateDataRepositoryAssociation

    • UpdateDataRepositoryAssociation

    • DescribeDataRepositoryAssociations

    Data repository associations are supported on Amazon File Cache resources and all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.

    " }, "DataRepositoryAssociationId":{ "type":"string", @@ -3007,14 +3008,14 @@ "members":{ "Mode":{ "shape":"DiskIopsConfigurationMode", - "documentation":"

    Specifies whether the number of IOPS for the file system is using the system default (AUTOMATIC) or was provisioned by the customer (USER_PROVISIONED).

    " + "documentation":"

    Specifies whether the file system is using the AUTOMATIC setting of SSD IOPS of 3 IOPS per GB of storage capacity, , or if it using a USER_PROVISIONED value.

    " }, "Iops":{ "shape":"Iops", "documentation":"

    The total number of SSD IOPS provisioned for the file system.

    " } }, - "documentation":"

    The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of the total number of provisioned SSD IOPS and how the amount was provisioned (by the customer or by the system).

    " + "documentation":"

    The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP or FSx for OpenZFS file system. By default, Amazon FSx automatically provisions 3 IOPS per GB of storage capacity. You can provision additional IOPS per GB of storage. The configuration consists of the total number of provisioned SSD IOPS and how it is was provisioned, or the mode (by the customer or by Amazon FSx).

    " }, "DiskIopsConfigurationMode":{ "type":"string", @@ -4044,7 +4045,11 @@ "documentation":"

    (Multi-AZ only) The VPC route tables in which your file system's endpoints are created.

    " }, "ThroughputCapacity":{"shape":"MegabytesPerSecond"}, - "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"} + "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"}, + "FsxAdminPassword":{ + "shape":"AdminPassword", + "documentation":"

    You can use the fsxadmin user account to access the NetApp ONTAP CLI and REST API. The password value is always redacted in the response.

    " + } }, "documentation":"

    Configuration for the FSx for NetApp ONTAP file system.

    " }, @@ -4375,7 +4380,7 @@ }, "ProgressPercent":{ "type":"integer", - "documentation":"

    The current percent of progress of an asynchronous task.

    ", + "documentation":"

    Displays the current percent of progress of an asynchronous task.

    ", "max":100, "min":0 }, @@ -4618,25 +4623,37 @@ "documentation":"

    A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory.

    " } }, - "documentation":"

    The configuration that Amazon FSx uses to join a FSx for Windows File Server file system or an ONTAP storage virtual machine (SVM) to a self-managed (including on-premises) Microsoft Active Directory (AD) directory. For more information, see Using Amazon FSx with your self-managed Microsoft Active Directory or Managing SVMs.

    " + "documentation":"

    The configuration that Amazon FSx uses to join a FSx for Windows File Server file system or an FSx for ONTAP storage virtual machine (SVM) to a self-managed (including on-premises) Microsoft Active Directory (AD) directory. For more information, see Using Amazon FSx for Windows with your self-managed Microsoft Active Directory or Managing FSx for ONTAP SVMs.

    " }, "SelfManagedActiveDirectoryConfigurationUpdates":{ "type":"structure", "members":{ "UserName":{ "shape":"DirectoryUserName", - "documentation":"

    The user name for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. This account must have the permission to join computers to the domain in the organizational unit provided in OrganizationalUnitDistinguishedName.

    " + "documentation":"

    Specifies the updated user name for the service account on your self-managed AD domain. Amazon FSx uses this account to join to your self-managed AD domain.

    This account must have the permissions required to join computers to the domain in the organizational unit provided in OrganizationalUnitDistinguishedName.

    " }, "Password":{ "shape":"DirectoryPassword", - "documentation":"

    The password for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain.

    " + "documentation":"

    Specifies the updated password for the service account on your self-managed AD domain. Amazon FSx uses this account to join to your self-managed AD domain.

    " }, "DnsIps":{ "shape":"DnsIps", - "documentation":"

    A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory.

    " + "documentation":"

    A list of up to three DNS server or domain controller IP addresses in your self-managed AD domain.

    " + }, + "DomainName":{ + "shape":"ActiveDirectoryFullyQualifiedName", + "documentation":"

    Specifies an updated fully qualified domain name of your self-managed AD configuration.

    " + }, + "OrganizationalUnitDistinguishedName":{ + "shape":"OrganizationalUnitDistinguishedName", + "documentation":"

    Specifies an updated fully qualified distinguished name of the organization unit within your self-managed AD.

    " + }, + "FileSystemAdministratorsGroup":{ + "shape":"FileSystemAdministratorsGroupName", + "documentation":"

    Specifies the updated name of the self-managed AD domain group whose members are granted administrative privileges for the Amazon FSx resource.

    " } }, - "documentation":"

    The configuration that Amazon FSx uses to join the Windows File Server instance to a self-managed Microsoft Active Directory (AD) directory.

    " + "documentation":"

    Specifies changes you are making to the self-managed Microsoft Active Directory (AD) configuration to which an FSx for Windows File Server file system or an FSx for ONTAP SVM is joined.

    " }, "ServiceLimit":{ "type":"string", @@ -4806,13 +4823,13 @@ }, "StorageCapacity":{ "type":"integer", - "documentation":"

    The storage capacity for your Amazon FSx file system, in gibibytes.

    ", + "documentation":"

    Specifies the file system's storage capacity, in gibibytes (GiB).

    ", "max":2147483647, "min":0 }, "StorageType":{ "type":"string", - "documentation":"

    The storage type for your Amazon FSx file system.

    ", + "documentation":"

    Specifies the file system's storage type.

    ", "enum":[ "SSD", "HDD" @@ -4980,11 +4997,11 @@ "members":{ "NetBiosName":{ "shape":"NetBiosAlias", - "documentation":"

    The NetBIOS name of the Active Directory computer object that is joined to your SVM.

    " + "documentation":"

    The NetBIOS name of the AD computer object to which the SVM is joined.

    " }, "SelfManagedActiveDirectoryConfiguration":{"shape":"SelfManagedActiveDirectoryAttributes"} }, - "documentation":"

    Describes the configuration of the Microsoft Active Directory (AD) directory to which the Amazon FSx for ONTAP storage virtual machine (SVM) is joined. Pleae note, account credentials are not returned in the response payload.

    " + "documentation":"

    Describes the Microsoft Active Directory (AD) directory configuration to which the FSx for ONTAP storage virtual machine (SVM) is joined. Note that account credentials are not returned in the response payload.

    " }, "SvmEndpoint":{ "type":"structure", @@ -5261,16 +5278,16 @@ "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, "FsxAdminPassword":{ "shape":"AdminPassword", - "documentation":"

    The ONTAP administrative password for the fsxadmin user.

    " + "documentation":"

    Update the password for the fsxadmin user by entering a new password. You use the fsxadmin user to access the NetApp ONTAP CLI and REST API to manage your file system resources. For more information, see Managing resources using NetApp Applicaton.

    " }, "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"}, "DiskIopsConfiguration":{ "shape":"DiskIopsConfiguration", - "documentation":"

    The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of an IOPS mode (AUTOMATIC or USER_PROVISIONED), and in the case of USER_PROVISIONED IOPS, the total number of SSD IOPS provisioned.

    " + "documentation":"

    The SSD IOPS (input output operations per second) configuration for an Amazon FSx for NetApp ONTAP file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of an IOPS mode (AUTOMATIC or USER_PROVISIONED), and in the case of USER_PROVISIONED IOPS, the total number of SSD IOPS provisioned. For more information, see Updating SSD storage capacity and IOPS.

    " }, "ThroughputCapacity":{ "shape":"MegabytesPerSecond", - "documentation":"

    Specifies the throughput of an FSx for NetApp ONTAP file system, measured in megabytes per second (MBps). Valid values are 128, 256, 512, 1024, 2048, and 4096 MBps.

    " + "documentation":"

    Enter a new value to change the amount of throughput capacity for the file system. Throughput capacity is measured in megabytes per second (MBps). Valid values are 128, 256, 512, 1024, 2048, and 4096 MBps. For more information, see Managing throughput capacity in the FSx for ONTAP User Guide.

    " }, "AddRouteTableIds":{ "shape":"RouteTableIds", @@ -5320,7 +5337,7 @@ }, "StorageCapacity":{ "shape":"StorageCapacity", - "documentation":"

    Use this parameter to increase the storage capacity of an FSx for Windows File Server, FSx for Lustre, FSx for OpenZFS, or FSx for ONTAP file system. Specifies the storage capacity target value, in GiB, to increase the storage capacity for the file system that you're updating.

    You can't make a storage capacity increase request if there is an existing storage capacity increase request in progress.

    For Lustre file systems, the storage capacity target value can be the following:

    • For SCRATCH_2, PERSISTENT_1, and PERSISTENT_2 SSD deployment types, valid values are in multiples of 2400 GiB. The value must be greater than the current storage capacity.

    • For PERSISTENT HDD file systems, valid values are multiples of 6000 GiB for 12-MBps throughput per TiB file systems and multiples of 1800 GiB for 40-MBps throughput per TiB file systems. The values must be greater than the current storage capacity.

    • For SCRATCH_1 file systems, you can't increase the storage capacity.

    For more information, see Managing storage and throughput capacity in the FSx for Lustre User Guide.

    For FSx for OpenZFS file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. For more information, see Managing storage capacity in the FSx for OpenZFS User Guide.

    For Windows file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. To increase storage capacity, the file system must have at least 16 MBps of throughput capacity. For more information, see Managing storage capacity in the Amazon FSx for Windows File Server User Guide.

    For ONTAP file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. For more information, see Managing storage capacity and provisioned IOPS in the Amazon FSx for NetApp ONTAP User Guide.

    " + "documentation":"

    Use this parameter to increase the storage capacity of an FSx for Windows File Server, FSx for Lustre, FSx for OpenZFS, or FSx for ONTAP file system. Specifies the storage capacity target value, in GiB, to increase the storage capacity for the file system that you're updating.

    You can't make a storage capacity increase request if there is an existing storage capacity increase request in progress.

    For Lustre file systems, the storage capacity target value can be the following:

    • For SCRATCH_2, PERSISTENT_1, and PERSISTENT_2 SSD deployment types, valid values are in multiples of 2400 GiB. The value must be greater than the current storage capacity.

    • For PERSISTENT HDD file systems, valid values are multiples of 6000 GiB for 12-MBps throughput per TiB file systems and multiples of 1800 GiB for 40-MBps throughput per TiB file systems. The values must be greater than the current storage capacity.

    • For SCRATCH_1 file systems, you can't increase the storage capacity.

    For more information, see Managing storage and throughput capacity in the FSx for Lustre User Guide.

    For FSx for OpenZFS file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. For more information, see Managing storage capacity in the FSx for OpenZFS User Guide.

    For Windows file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. To increase storage capacity, the file system must have at least 16 MBps of throughput capacity. For more information, see Managing storage capacity in the Amazon FSxfor Windows File Server User Guide.

    For ONTAP file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. For more information, see Managing storage capacity and provisioned IOPS in the Amazon FSx for NetApp ONTAP User Guide.

    " }, "WindowsConfiguration":{ "shape":"UpdateFileSystemWindowsConfiguration", @@ -5330,7 +5347,7 @@ "OntapConfiguration":{"shape":"UpdateFileSystemOntapConfiguration"}, "OpenZFSConfiguration":{ "shape":"UpdateFileSystemOpenZFSConfiguration", - "documentation":"

    The configuration updates for an Amazon FSx for OpenZFS file system.

    " + "documentation":"

    The configuration updates for an FSx for OpenZFS file system.

    " } }, "documentation":"

    The request object for the UpdateFileSystem operation.

    " @@ -5479,7 +5496,7 @@ "members":{ "ActiveDirectoryConfiguration":{ "shape":"UpdateSvmActiveDirectoryConfiguration", - "documentation":"

    Updates the Microsoft Active Directory (AD) configuration for an SVM that is joined to an AD.

    " + "documentation":"

    Specifies updates to an SVM's Microsoft Active Directory (AD) configuration.

    " }, "ClientRequestToken":{ "shape":"ClientRequestToken", @@ -5491,7 +5508,7 @@ }, "SvmAdminPassword":{ "shape":"AdminPassword", - "documentation":"

    Enter a new SvmAdminPassword if you are updating it.

    " + "documentation":"

    Specifies a new SvmAdminPassword.

    " } } }, @@ -5504,9 +5521,13 @@ "UpdateSvmActiveDirectoryConfiguration":{ "type":"structure", "members":{ - "SelfManagedActiveDirectoryConfiguration":{"shape":"SelfManagedActiveDirectoryConfigurationUpdates"} + "SelfManagedActiveDirectoryConfiguration":{"shape":"SelfManagedActiveDirectoryConfigurationUpdates"}, + "NetBiosName":{ + "shape":"NetBiosAlias", + "documentation":"

    Specifies an updated NetBIOS name of the AD computer object NetBiosName to which an SVM is joined.

    " + } }, - "documentation":"

    Updates the Microsoft Active Directory (AD) configuration of an SVM joined to an AD. Please note, account credentials are not returned in the response payload.

    " + "documentation":"

    Specifies updates to an FSx for ONTAP storage virtual machine's (SVM) Microsoft Active Directory (AD) configuration. Note that account credentials are not returned in the response payload.

    " }, "UpdateVolumeRequest":{ "type":"structure", diff --git a/services/gamelift/pom.xml b/services/gamelift/pom.xml index 2cea676bb3f3..8fffdbaec355 100644 --- a/services/gamelift/pom.xml +++ b/services/gamelift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT gamelift AWS Java SDK :: Services :: AWS GameLift diff --git a/services/gamesparks/pom.xml b/services/gamesparks/pom.xml index 289865751093..395a9c57de9e 100644 --- a/services/gamesparks/pom.xml +++ b/services/gamesparks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT gamesparks AWS Java SDK :: Services :: Game Sparks diff --git a/services/glacier/pom.xml b/services/glacier/pom.xml index d3ab71b9b460..e1e72ab1388f 100644 --- a/services/glacier/pom.xml +++ b/services/glacier/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT glacier AWS Java SDK :: Services :: Amazon Glacier diff --git a/services/globalaccelerator/pom.xml b/services/globalaccelerator/pom.xml index b7e9a1665345..18f4ce3e05dd 100644 --- a/services/globalaccelerator/pom.xml +++ b/services/globalaccelerator/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT globalaccelerator AWS Java SDK :: Services :: Global Accelerator diff --git a/services/glue/pom.xml b/services/glue/pom.xml index 5a728708355a..58166d4e1c99 100644 --- a/services/glue/pom.xml +++ b/services/glue/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 glue diff --git a/services/glue/src/main/resources/codegen-resources/service-2.json b/services/glue/src/main/resources/codegen-resources/service-2.json index 935d58250d24..4e38fc15813b 100644 --- a/services/glue/src/main/resources/codegen-resources/service-2.json +++ b/services/glue/src/main/resources/codegen-resources/service-2.json @@ -8223,6 +8223,10 @@ "DatabaseName":{ "shape":"NameString", "documentation":"

    The name of the catalog database.

    " + }, + "Region":{ + "shape":"NameString", + "documentation":"

    Region of the target database.

    " } }, "documentation":"

    A structure that describes a target database for resource linking.

    " @@ -18740,6 +18744,10 @@ "Name":{ "shape":"NameString", "documentation":"

    The name of the target table.

    " + }, + "Region":{ + "shape":"NameString", + "documentation":"

    Region of the target table.

    " } }, "documentation":"

    A structure that describes a target table for resource linking.

    " diff --git a/services/grafana/pom.xml b/services/grafana/pom.xml index 58314d244230..230a8561c14e 100644 --- a/services/grafana/pom.xml +++ b/services/grafana/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT grafana AWS Java SDK :: Services :: Grafana diff --git a/services/greengrass/pom.xml b/services/greengrass/pom.xml index 36aa3ad5262e..9f978d1a42a6 100644 --- a/services/greengrass/pom.xml +++ b/services/greengrass/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT greengrass AWS Java SDK :: Services :: AWS Greengrass diff --git a/services/greengrassv2/pom.xml b/services/greengrassv2/pom.xml index 8e419d666298..e473f41cfafe 100644 --- a/services/greengrassv2/pom.xml +++ b/services/greengrassv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT greengrassv2 AWS Java SDK :: Services :: Greengrass V2 diff --git a/services/groundstation/pom.xml b/services/groundstation/pom.xml index e3f65f5262ca..d423d34787d1 100644 --- a/services/groundstation/pom.xml +++ b/services/groundstation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT groundstation AWS Java SDK :: Services :: GroundStation diff --git a/services/guardduty/pom.xml b/services/guardduty/pom.xml index dbf362cf95ae..e66209ea7c97 100644 --- a/services/guardduty/pom.xml +++ b/services/guardduty/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 guardduty diff --git a/services/guardduty/src/main/resources/codegen-resources/service-2.json b/services/guardduty/src/main/resources/codegen-resources/service-2.json index 6746a970260c..44ff57eed1e9 100644 --- a/services/guardduty/src/main/resources/codegen-resources/service-2.json +++ b/services/guardduty/src/main/resources/codegen-resources/service-2.json @@ -117,7 +117,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

    Creates member accounts of the current Amazon Web Services account by specifying a list of Amazon Web Services account IDs. This step is a prerequisite for managing the associated member accounts either by invitation or through an organization.

    When using Create Members as an organizations delegated administrator this action will enable GuardDuty in the added member accounts, with the exception of the organization delegated administrator account, which must enable GuardDuty prior to being added as a member.

    If you are adding accounts by invitation, use this action after GuardDuty has bee enabled in potential member accounts and before using InviteMembers.

    " + "documentation":"

    Creates member accounts of the current Amazon Web Services account by specifying a list of Amazon Web Services account IDs. This step is a prerequisite for managing the associated member accounts either by invitation or through an organization.

    As a delegated administrator, using CreateMembers will enable GuardDuty in the added member accounts, with the exception of the organization delegated administrator account. A delegated administrator must enable GuardDuty prior to being added as a member.

    If you are adding accounts by invitation, before using InviteMembers, use CreateMembers after GuardDuty has been enabled in potential member accounts.

    If you disassociate a member from a GuardDuty delegated administrator, the member account details obtained from this API, including the associated email addresses, will be retained. This is done so that the delegated administrator can invoke the InviteMembers API without the need to invoke the CreateMembers API again. To remove the details associated with a member account, the delegated administrator must invoke the DeleteMembers API.

    " }, "CreatePublishingDestination":{ "name":"CreatePublishingDestination", @@ -357,7 +357,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

    Disassociates the current GuardDuty member account from its administrator account.

    With autoEnableOrganizationMembers configuration for your organization set to ALL, you'll receive an error if you attempt to disable GuardDuty in a member account.

    " + "documentation":"

    Disassociates the current GuardDuty member account from its administrator account.

    When you disassociate an invited member from a GuardDuty delegated administrator, the member account details obtained from the CreateMembers API, including the associated email addresses, are retained. This is done so that the delegated administrator can invoke the InviteMembers API without the need to invoke the CreateMembers API again. To remove the details associated with a member account, the delegated administrator must invoke the DeleteMembers API.

    With autoEnableOrganizationMembers configuration for your organization set to ALL, you'll receive an error if you attempt to disable GuardDuty in a member account.

    " }, "DisassociateFromMasterAccount":{ "name":"DisassociateFromMasterAccount", @@ -372,7 +372,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

    Disassociates the current GuardDuty member account from its administrator account.

    ", + "documentation":"

    Disassociates the current GuardDuty member account from its administrator account.

    When you disassociate an invited member from a GuardDuty delegated administrator, the member account details obtained from the CreateMembers API, including the associated email addresses, are retained. This is done so that the delegated administrator can invoke the InviteMembers API without the need to invoke the CreateMembers API again. To remove the details associated with a member account, the delegated administrator must invoke the DeleteMembers API.

    ", "deprecated":true, "deprecatedMessage":"This operation is deprecated, use DisassociateFromAdministratorAccount instead" }, @@ -389,7 +389,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

    Disassociates GuardDuty member accounts (to the current administrator account) specified by the account IDs.

    With autoEnableOrganizationMembers configuration for your organization set to ALL, you'll receive an error if you attempt to disassociate a member account before removing them from your Amazon Web Services organization.

    " + "documentation":"

    Disassociates GuardDuty member accounts (from the current administrator account) specified by the account IDs.

    When you disassociate an invited member from a GuardDuty delegated administrator, the member account details obtained from the CreateMembers API, including the associated email addresses, are retained. This is done so that the delegated administrator can invoke the InviteMembers API without the need to invoke the CreateMembers API again. To remove the details associated with a member account, the delegated administrator must invoke the DeleteMembers API.

    With autoEnableOrganizationMembers configuration for your organization set to ALL, you'll receive an error if you attempt to disassociate a member account before removing them from your Amazon Web Services organization.

    " }, "EnableOrganizationAdminAccount":{ "name":"EnableOrganizationAdminAccount", @@ -646,7 +646,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

    Invites other Amazon Web Services accounts (created as members of the current Amazon Web Services account by CreateMembers) to enable GuardDuty, and allow the current Amazon Web Services account to view and manage these accounts' findings on their behalf as the GuardDuty administrator account.

    " + "documentation":"

    Invites Amazon Web Services accounts to become members of an organization administered by the Amazon Web Services account that invokes this API. If you are using Amazon Web Services Organizations to manager your GuardDuty environment, this step is not needed. For more information, see Managing accounts with Amazon Web Services Organizations.

    To invite Amazon Web Services accounts, the first step is to ensure that GuardDuty has been enabled in the potential member accounts. You can now invoke this API to add accounts by invitation. The invited accounts can either accept or decline the invitation from their GuardDuty accounts. Each invited Amazon Web Services account can choose to accept the invitation from only one Amazon Web Services account. For more information, see Managing GuardDuty accounts by invitation.

    After the invite has been accepted and you choose to disassociate a member account (by using DisassociateMembers) from your account, the details of the member account obtained by invoking CreateMembers, including the associated email addresses, will be retained. This is done so that you can invoke InviteMembers without the need to invoke CreateMembers again. To remove the details associated with a member account, you must also invoke DeleteMembers.

    " }, "ListCoverage":{ "name":"ListCoverage", @@ -3096,7 +3096,7 @@ "members":{ "Domain":{ "shape":"String", - "documentation":"

    The domain information for the API request.

    ", + "documentation":"

    The domain information for the DNS query.

    ", "locationName":"domain" }, "Protocol":{ diff --git a/services/health/pom.xml b/services/health/pom.xml index f2e42f78d02f..79de7bef6cd6 100644 --- a/services/health/pom.xml +++ b/services/health/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT health AWS Java SDK :: Services :: AWS Health APIs and Notifications diff --git a/services/healthlake/pom.xml b/services/healthlake/pom.xml index 37cd0ccb9750..9c62801210dc 100644 --- a/services/healthlake/pom.xml +++ b/services/healthlake/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT healthlake AWS Java SDK :: Services :: Health Lake diff --git a/services/honeycode/pom.xml b/services/honeycode/pom.xml index 6965649450ba..ee982dd1bb7d 100644 --- a/services/honeycode/pom.xml +++ b/services/honeycode/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT honeycode AWS Java SDK :: Services :: Honeycode diff --git a/services/iam/pom.xml b/services/iam/pom.xml index 04d31bc615c8..39a2cc3a5aee 100644 --- a/services/iam/pom.xml +++ b/services/iam/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT iam AWS Java SDK :: Services :: AWS IAM diff --git a/services/iam/src/main/resources/codegen-resources/service-2.json b/services/iam/src/main/resources/codegen-resources/service-2.json index 99fa7fcae834..bc883919a5f3 100644 --- a/services/iam/src/main/resources/codegen-resources/service-2.json +++ b/services/iam/src/main/resources/codegen-resources/service-2.json @@ -1679,7 +1679,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

    Lists the IAM roles that have the specified path prefix. If there are none, the operation returns an empty list. For more information about roles, see Working with roles.

    IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for a role, see GetRole.

    You can paginate the results using the MaxItems and Marker parameters.

    " + "documentation":"

    Lists the IAM roles that have the specified path prefix. If there are none, the operation returns an empty list. For more information about roles, see Working with roles.

    IAM resource-listing operations return a subset of the available attributes for the resource. This operation does not return the following attributes, even though they are an attribute of the returned object:

    • PermissionsBoundary

    • RoleLastUsed

    • Tags

    To view all of the information for a role, see GetRole.

    You can paginate the results using the MaxItems and Marker parameters.

    " }, "ListSAMLProviderTags":{ "name":"ListSAMLProviderTags", @@ -1846,7 +1846,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

    Lists the IAM users that have the specified path prefix. If no path prefix is specified, the operation returns all users in the Amazon Web Services account. If there are none, the operation returns an empty list.

    IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for a user, see GetUser.

    You can paginate the results using the MaxItems and Marker parameters.

    " + "documentation":"

    Lists the IAM users that have the specified path prefix. If no path prefix is specified, the operation returns all users in the Amazon Web Services account. If there are none, the operation returns an empty list.

    IAM resource-listing operations return a subset of the available attributes for the resource. This operation does not return the following attributes, even though they are an attribute of the returned object:

    • PermissionsBoundary

    • Tags

    To view all of the information for a user, see GetUser.

    You can paginate the results using the MaxItems and Marker parameters.

    " }, "ListVirtualMFADevices":{ "name":"ListVirtualMFADevices", diff --git a/services/identitystore/pom.xml b/services/identitystore/pom.xml index 18c23cc5e365..62e94270d340 100644 --- a/services/identitystore/pom.xml +++ b/services/identitystore/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT identitystore AWS Java SDK :: Services :: Identitystore diff --git a/services/imagebuilder/pom.xml b/services/imagebuilder/pom.xml index 5216787428ec..0a641742d9ff 100644 --- a/services/imagebuilder/pom.xml +++ b/services/imagebuilder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT imagebuilder AWS Java SDK :: Services :: Imagebuilder diff --git a/services/imagebuilder/src/main/resources/codegen-resources/endpoint-tests.json b/services/imagebuilder/src/main/resources/codegen-resources/endpoint-tests.json index ed29f944ecff..bcfa0a4ab2f7 100644 --- a/services/imagebuilder/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/imagebuilder/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -21,9 +21,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -47,9 +47,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -99,9 +99,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -112,9 +112,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -125,9 +125,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -138,9 +138,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-west-1", "UseFIPS": false, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { @@ -151,9 +151,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-west-1", "UseFIPS": true, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { @@ -164,9 +164,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -177,9 +177,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -188,9 +188,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { @@ -201,9 +201,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -212,9 +212,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { @@ -225,9 +225,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -236,9 +236,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -249,9 +249,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -260,9 +260,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -273,9 +273,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -286,9 +286,9 @@ } }, "params": { - "UseDualStack": false, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -300,8 +300,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -311,9 +311,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -323,9 +323,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } }, diff --git a/services/imagebuilder/src/main/resources/codegen-resources/service-2.json b/services/imagebuilder/src/main/resources/codegen-resources/service-2.json index dee0502afd3c..3f1b55a9e8e0 100644 --- a/services/imagebuilder/src/main/resources/codegen-resources/service-2.json +++ b/services/imagebuilder/src/main/resources/codegen-resources/service-2.json @@ -3501,7 +3501,7 @@ }, "dateNextRun":{ "shape":"DateTime", - "documentation":"

    This is no longer supported, and does not return a value.

    " + "documentation":"

    The next date when the pipeline is scheduled to run.

    " }, "tags":{ "shape":"TagMap", diff --git a/services/inspector/pom.xml b/services/inspector/pom.xml index 137a583fd759..4abc90cb813f 100644 --- a/services/inspector/pom.xml +++ b/services/inspector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT inspector AWS Java SDK :: Services :: Amazon Inspector Service diff --git a/services/inspector2/pom.xml b/services/inspector2/pom.xml index 63eed478cd0c..a2faa9c9dad4 100644 --- a/services/inspector2/pom.xml +++ b/services/inspector2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT inspector2 AWS Java SDK :: Services :: Inspector2 diff --git a/services/internetmonitor/pom.xml b/services/internetmonitor/pom.xml index b8617e93b342..54bdf34c1f3a 100644 --- a/services/internetmonitor/pom.xml +++ b/services/internetmonitor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT internetmonitor AWS Java SDK :: Services :: Internet Monitor diff --git a/services/iot/pom.xml b/services/iot/pom.xml index b16e5bc39aa2..6d1edb104635 100644 --- a/services/iot/pom.xml +++ b/services/iot/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT iot AWS Java SDK :: Services :: AWS IoT diff --git a/services/iot1clickdevices/pom.xml b/services/iot1clickdevices/pom.xml index 14a7ab09409d..e8c023983908 100644 --- a/services/iot1clickdevices/pom.xml +++ b/services/iot1clickdevices/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT iot1clickdevices AWS Java SDK :: Services :: IoT 1Click Devices Service diff --git a/services/iot1clickprojects/pom.xml b/services/iot1clickprojects/pom.xml index 1d21eb16832c..27a1e8e0f829 100644 --- a/services/iot1clickprojects/pom.xml +++ b/services/iot1clickprojects/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT iot1clickprojects AWS Java SDK :: Services :: IoT 1Click Projects diff --git a/services/iotanalytics/pom.xml b/services/iotanalytics/pom.xml index aeb66c63bc8f..41840d786873 100644 --- a/services/iotanalytics/pom.xml +++ b/services/iotanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT iotanalytics AWS Java SDK :: Services :: IoTAnalytics diff --git a/services/iotdataplane/pom.xml b/services/iotdataplane/pom.xml index 473592dd1cbf..073c0dcdf65c 100644 --- a/services/iotdataplane/pom.xml +++ b/services/iotdataplane/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT iotdataplane AWS Java SDK :: Services :: AWS IoT Data Plane diff --git a/services/iotdeviceadvisor/pom.xml b/services/iotdeviceadvisor/pom.xml index 50f087f9106f..f291879be3f5 100644 --- a/services/iotdeviceadvisor/pom.xml +++ b/services/iotdeviceadvisor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT iotdeviceadvisor AWS Java SDK :: Services :: Iot Device Advisor diff --git a/services/iotevents/pom.xml b/services/iotevents/pom.xml index 062d8b24f0c4..b67b1c4b74f1 100644 --- a/services/iotevents/pom.xml +++ b/services/iotevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT iotevents AWS Java SDK :: Services :: IoT Events diff --git a/services/ioteventsdata/pom.xml b/services/ioteventsdata/pom.xml index 77652bf2dd25..9b4c0952143c 100644 --- a/services/ioteventsdata/pom.xml +++ b/services/ioteventsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ioteventsdata AWS Java SDK :: Services :: IoT Events Data diff --git a/services/iotfleethub/pom.xml b/services/iotfleethub/pom.xml index 088d5c8d7a2b..1ebe448bf694 100644 --- a/services/iotfleethub/pom.xml +++ b/services/iotfleethub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT iotfleethub AWS Java SDK :: Services :: Io T Fleet Hub diff --git a/services/iotfleetwise/pom.xml b/services/iotfleetwise/pom.xml index d2c5fe6fcfad..63d5d6ad48e7 100644 --- a/services/iotfleetwise/pom.xml +++ b/services/iotfleetwise/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT iotfleetwise AWS Java SDK :: Services :: Io T Fleet Wise diff --git a/services/iotjobsdataplane/pom.xml b/services/iotjobsdataplane/pom.xml index c23f51aff7ee..239645011130 100644 --- a/services/iotjobsdataplane/pom.xml +++ b/services/iotjobsdataplane/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT iotjobsdataplane AWS Java SDK :: Services :: IoT Jobs Data Plane diff --git a/services/iotroborunner/pom.xml b/services/iotroborunner/pom.xml index a708a43be363..d2d1d7e1d551 100644 --- a/services/iotroborunner/pom.xml +++ b/services/iotroborunner/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT iotroborunner AWS Java SDK :: Services :: IoT Robo Runner diff --git a/services/iotsecuretunneling/pom.xml b/services/iotsecuretunneling/pom.xml index 4fda0129eda1..dce351410324 100644 --- a/services/iotsecuretunneling/pom.xml +++ b/services/iotsecuretunneling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT iotsecuretunneling AWS Java SDK :: Services :: IoTSecureTunneling diff --git a/services/iotsitewise/pom.xml b/services/iotsitewise/pom.xml index 620767360e42..dbb4e6d91170 100644 --- a/services/iotsitewise/pom.xml +++ b/services/iotsitewise/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT iotsitewise AWS Java SDK :: Services :: Io T Site Wise diff --git a/services/iotthingsgraph/pom.xml b/services/iotthingsgraph/pom.xml index dfcdda738734..e78d561525d4 100644 --- a/services/iotthingsgraph/pom.xml +++ b/services/iotthingsgraph/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT iotthingsgraph AWS Java SDK :: Services :: IoTThingsGraph diff --git a/services/iottwinmaker/pom.xml b/services/iottwinmaker/pom.xml index 97792d9ec032..39a058523409 100644 --- a/services/iottwinmaker/pom.xml +++ b/services/iottwinmaker/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT iottwinmaker AWS Java SDK :: Services :: Io T Twin Maker diff --git a/services/iotwireless/pom.xml b/services/iotwireless/pom.xml index c72cc6485324..aeecf83b9978 100644 --- a/services/iotwireless/pom.xml +++ b/services/iotwireless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT iotwireless AWS Java SDK :: Services :: IoT Wireless diff --git a/services/ivs/pom.xml b/services/ivs/pom.xml index 099ca7a10c71..6e0c392e3f04 100644 --- a/services/ivs/pom.xml +++ b/services/ivs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ivs AWS Java SDK :: Services :: Ivs diff --git a/services/ivschat/pom.xml b/services/ivschat/pom.xml index 3a16c251dfdb..5a67423e526f 100644 --- a/services/ivschat/pom.xml +++ b/services/ivschat/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ivschat AWS Java SDK :: Services :: Ivschat diff --git a/services/ivsrealtime/pom.xml b/services/ivsrealtime/pom.xml index 22b524e4dd0d..bb0db9cddb76 100644 --- a/services/ivsrealtime/pom.xml +++ b/services/ivsrealtime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ivsrealtime AWS Java SDK :: Services :: IVS Real Time diff --git a/services/kafka/pom.xml b/services/kafka/pom.xml index 03286bb03b6e..669ba4886075 100644 --- a/services/kafka/pom.xml +++ b/services/kafka/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT kafka AWS Java SDK :: Services :: Kafka diff --git a/services/kafkaconnect/pom.xml b/services/kafkaconnect/pom.xml index 50d32e59f626..c9a7fa717835 100644 --- a/services/kafkaconnect/pom.xml +++ b/services/kafkaconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT kafkaconnect AWS Java SDK :: Services :: Kafka Connect diff --git a/services/kendra/pom.xml b/services/kendra/pom.xml index 5a2c0a63e6bc..9bb14dd61df8 100644 --- a/services/kendra/pom.xml +++ b/services/kendra/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT kendra AWS Java SDK :: Services :: Kendra diff --git a/services/kendraranking/pom.xml b/services/kendraranking/pom.xml index 890b61ecb9ca..1b2b2e031b57 100644 --- a/services/kendraranking/pom.xml +++ b/services/kendraranking/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT kendraranking AWS Java SDK :: Services :: Kendra Ranking diff --git a/services/keyspaces/pom.xml b/services/keyspaces/pom.xml index 85272f5d48c7..590ad9340b4f 100644 --- a/services/keyspaces/pom.xml +++ b/services/keyspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT keyspaces AWS Java SDK :: Services :: Keyspaces diff --git a/services/kinesis/pom.xml b/services/kinesis/pom.xml index 3896146fc948..5d14e36fce15 100644 --- a/services/kinesis/pom.xml +++ b/services/kinesis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT kinesis AWS Java SDK :: Services :: Amazon Kinesis diff --git a/services/kinesisanalytics/pom.xml b/services/kinesisanalytics/pom.xml index f35848648bda..5fb4c6d66113 100644 --- a/services/kinesisanalytics/pom.xml +++ b/services/kinesisanalytics/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT kinesisanalytics AWS Java SDK :: Services :: Amazon Kinesis Analytics diff --git a/services/kinesisanalyticsv2/pom.xml b/services/kinesisanalyticsv2/pom.xml index 0cd5b7239a4e..3e99a121bad5 100644 --- a/services/kinesisanalyticsv2/pom.xml +++ b/services/kinesisanalyticsv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT kinesisanalyticsv2 AWS Java SDK :: Services :: Kinesis Analytics V2 diff --git a/services/kinesisvideo/pom.xml b/services/kinesisvideo/pom.xml index bc805751db2d..373f97520f55 100644 --- a/services/kinesisvideo/pom.xml +++ b/services/kinesisvideo/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 kinesisvideo diff --git a/services/kinesisvideoarchivedmedia/pom.xml b/services/kinesisvideoarchivedmedia/pom.xml index 3719c3881309..d29133b97715 100644 --- a/services/kinesisvideoarchivedmedia/pom.xml +++ b/services/kinesisvideoarchivedmedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT kinesisvideoarchivedmedia AWS Java SDK :: Services :: Kinesis Video Archived Media diff --git a/services/kinesisvideomedia/pom.xml b/services/kinesisvideomedia/pom.xml index 2d13ffd47c0a..f2cadeedb7c0 100644 --- a/services/kinesisvideomedia/pom.xml +++ b/services/kinesisvideomedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT kinesisvideomedia AWS Java SDK :: Services :: Kinesis Video Media diff --git a/services/kinesisvideosignaling/pom.xml b/services/kinesisvideosignaling/pom.xml index 89a65719197d..c7c5ee170f5c 100644 --- a/services/kinesisvideosignaling/pom.xml +++ b/services/kinesisvideosignaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT kinesisvideosignaling AWS Java SDK :: Services :: Kinesis Video Signaling diff --git a/services/kinesisvideowebrtcstorage/pom.xml b/services/kinesisvideowebrtcstorage/pom.xml index ee5afdf39bbe..f574bda29b91 100644 --- a/services/kinesisvideowebrtcstorage/pom.xml +++ b/services/kinesisvideowebrtcstorage/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT kinesisvideowebrtcstorage AWS Java SDK :: Services :: Kinesis Video Web RTC Storage diff --git a/services/kms/pom.xml b/services/kms/pom.xml index 24f250a8499a..51011d519286 100644 --- a/services/kms/pom.xml +++ b/services/kms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT kms AWS Java SDK :: Services :: AWS KMS diff --git a/services/lakeformation/pom.xml b/services/lakeformation/pom.xml index 971fd4b6965c..bd3b74984c37 100644 --- a/services/lakeformation/pom.xml +++ b/services/lakeformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT lakeformation AWS Java SDK :: Services :: LakeFormation diff --git a/services/lambda/pom.xml b/services/lambda/pom.xml index 38ae06d612ba..231e6f3dfaad 100644 --- a/services/lambda/pom.xml +++ b/services/lambda/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT lambda AWS Java SDK :: Services :: AWS Lambda diff --git a/services/lambda/src/main/resources/codegen-resources/service-2.json b/services/lambda/src/main/resources/codegen-resources/service-2.json index c8d8eb2ccd8a..b8ddb938be49 100644 --- a/services/lambda/src/main/resources/codegen-resources/service-2.json +++ b/services/lambda/src/main/resources/codegen-resources/service-2.json @@ -615,7 +615,8 @@ {"shape":"KMSNotFoundException"}, {"shape":"InvalidRuntimeException"}, {"shape":"ResourceConflictException"}, - {"shape":"ResourceNotReadyException"} + {"shape":"ResourceNotReadyException"}, + {"shape":"RecursiveInvocationException"} ], "documentation":"

    Invokes a Lambda function. You can invoke a function synchronously (and wait for the response), or asynchronously. To invoke a function asynchronously, set InvocationType to Event.

    For synchronous invocation, details about the function response, including errors, are included in the response body and headers. For either invocation type, you can find more information in the execution log and trace.

    When an error occurs, your function may be invoked multiple times. Retry behavior varies by error type, client, event source, and invocation type. For example, if you invoke a function asynchronously and it returns an error, Lambda executes the function up to two more times. For more information, see Error handling and automatic retries in Lambda.

    For asynchronous invocation, Lambda adds events to a queue before sending them to your function. If your function does not have enough capacity to keep up with the queue, events may be lost. Occasionally, your function may receive the same event multiple times, even if no error occurs. To retain events that were not processed, configure your function with a dead-letter queue.

    The status code in the API response doesn't reflect function errors. Error codes are reserved for errors that prevent your function from executing, such as permissions errors, quota errors, or issues with your function's code and configuration. For example, Lambda returns TooManyRequestsException if running the function would cause you to exceed a concurrency limit at either the account level (ConcurrentInvocationLimitExceeded) or function level (ReservedFunctionConcurrentInvocationLimitExceeded).

    For functions with a long timeout, your client might disconnect during synchronous invocation while it waits for a response. Configure your HTTP client, SDK, firewall, proxy, or operating system to allow for long connections with timeout or keep-alive settings.

    This operation requires permission for the lambda:InvokeFunction action. For details on how to set up permissions for cross-account invocations, see Granting function access to other accounts.

    " }, @@ -675,7 +676,8 @@ {"shape":"KMSNotFoundException"}, {"shape":"InvalidRuntimeException"}, {"shape":"ResourceConflictException"}, - {"shape":"ResourceNotReadyException"} + {"shape":"ResourceNotReadyException"}, + {"shape":"RecursiveInvocationException"} ], "documentation":"

    Configure your Lambda functions to stream response payloads back to clients. For more information, see Configuring a Lambda function to stream responses.

    This operation requires permission for the lambda:InvokeFunction action. For details on how to set up permissions for cross-account invocations, see Granting function access to other accounts.

    " }, @@ -5029,6 +5031,22 @@ "max":1, "min":1 }, + "RecursiveInvocationException":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"String", + "documentation":"

    The exception type.

    " + }, + "Message":{ + "shape":"String", + "documentation":"

    The exception message.

    " + } + }, + "documentation":"

    Lambda has detected your function being invoked in a recursive loop with other Amazon Web Services resources and stopped your function's invocation.

    ", + "error":{"httpStatusCode":400}, + "exception":true + }, "RemoveLayerVersionPermissionRequest":{ "type":"structure", "required":[ diff --git a/services/lexmodelbuilding/pom.xml b/services/lexmodelbuilding/pom.xml index 621590c33cfd..d809932f50a1 100644 --- a/services/lexmodelbuilding/pom.xml +++ b/services/lexmodelbuilding/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT lexmodelbuilding AWS Java SDK :: Services :: Amazon Lex Model Building diff --git a/services/lexmodelsv2/pom.xml b/services/lexmodelsv2/pom.xml index 327a168f6b16..6e11ca9231a1 100644 --- a/services/lexmodelsv2/pom.xml +++ b/services/lexmodelsv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT lexmodelsv2 AWS Java SDK :: Services :: Lex Models V2 diff --git a/services/lexruntime/pom.xml b/services/lexruntime/pom.xml index 980fe9fa12f1..75d3a4bd9a4c 100644 --- a/services/lexruntime/pom.xml +++ b/services/lexruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT lexruntime AWS Java SDK :: Services :: Amazon Lex Runtime diff --git a/services/lexruntimev2/pom.xml b/services/lexruntimev2/pom.xml index d4fcb571281e..6f92cdcdd380 100644 --- a/services/lexruntimev2/pom.xml +++ b/services/lexruntimev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT lexruntimev2 AWS Java SDK :: Services :: Lex Runtime V2 diff --git a/services/licensemanager/pom.xml b/services/licensemanager/pom.xml index f34b9959aad2..6752170c2631 100644 --- a/services/licensemanager/pom.xml +++ b/services/licensemanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT licensemanager AWS Java SDK :: Services :: License Manager diff --git a/services/licensemanagerlinuxsubscriptions/pom.xml b/services/licensemanagerlinuxsubscriptions/pom.xml index d927a89ce53b..f72124d71803 100644 --- a/services/licensemanagerlinuxsubscriptions/pom.xml +++ b/services/licensemanagerlinuxsubscriptions/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT licensemanagerlinuxsubscriptions AWS Java SDK :: Services :: License Manager Linux Subscriptions diff --git a/services/licensemanagerusersubscriptions/pom.xml b/services/licensemanagerusersubscriptions/pom.xml index 72245b3749d1..7d975e19284a 100644 --- a/services/licensemanagerusersubscriptions/pom.xml +++ b/services/licensemanagerusersubscriptions/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT licensemanagerusersubscriptions AWS Java SDK :: Services :: License Manager User Subscriptions diff --git a/services/lightsail/pom.xml b/services/lightsail/pom.xml index 89a3ba999840..bfe26936a05c 100644 --- a/services/lightsail/pom.xml +++ b/services/lightsail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT lightsail AWS Java SDK :: Services :: Amazon Lightsail diff --git a/services/lightsail/src/main/resources/codegen-resources/endpoint-tests.json b/services/lightsail/src/main/resources/codegen-resources/endpoint-tests.json index ef0830464521..0b3e73dea42e 100644 --- a/services/lightsail/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/lightsail/src/main/resources/codegen-resources/endpoint-tests.json @@ -325,6 +325,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { @@ -338,6 +349,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { @@ -351,6 +373,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { @@ -364,6 +397,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { @@ -427,6 +471,12 @@ "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/lightsail/src/main/resources/codegen-resources/service-2.json b/services/lightsail/src/main/resources/codegen-resources/service-2.json index fba5ddf088e2..dbebbe0c225b 100644 --- a/services/lightsail/src/main/resources/codegen-resources/service-2.json +++ b/services/lightsail/src/main/resources/codegen-resources/service-2.json @@ -1311,7 +1311,7 @@ {"shape":"AccessDeniedException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

    Returns information about one or more Amazon Lightsail SSL/TLS certificates.

    To get a summary of a certificate, ommit includeCertificateDetails from your request. The response will include only the certificate Amazon Resource Name (ARN), certificate name, domain name, and tags.

    " + "documentation":"

    Returns information about one or more Amazon Lightsail SSL/TLS certificates.

    To get a summary of a certificate, omit includeCertificateDetails from your request. The response will include only the certificate Amazon Resource Name (ARN), certificate name, domain name, and tags.

    " }, "GetCloudFormationStackRecords":{ "name":"GetCloudFormationStackRecords", @@ -3960,7 +3960,7 @@ "documentation":"

    The support code. Include this code in your email to support when you have questions about your Lightsail certificate. This code enables our support team to look up your Lightsail information more easily.

    " } }, - "documentation":"

    Describes the full details of an Amazon Lightsail SSL/TLS certificate.

    To get a summary of a certificate, use the GetCertificates action and ommit includeCertificateDetails from your request. The response will include only the certificate Amazon Resource Name (ARN), certificate name, domain name, and tags.

    " + "documentation":"

    Describes the full details of an Amazon Lightsail SSL/TLS certificate.

    To get a summary of a certificate, use the GetCertificates action and omit includeCertificateDetails from your request. The response will include only the certificate Amazon Resource Name (ARN), certificate name, domain name, and tags.

    " }, "CertificateDomainValidationStatus":{ "type":"string", @@ -7139,6 +7139,10 @@ "certificateName":{ "shape":"CertificateName", "documentation":"

    The name for the certificate for which to return information.

    When omitted, the response includes all of your certificates in the Amazon Web Services Region where the request is made.

    " + }, + "pageToken":{ + "shape":"string", + "documentation":"

    The token to advance to the next page of results from your request.

    To get a page token, perform an initial GetCertificates request. If your results are paginated, the response will return a next page token that you can specify as the page token in a subsequent request.

    " } } }, @@ -7148,6 +7152,10 @@ "certificates":{ "shape":"CertificateSummaryList", "documentation":"

    An object that describes certificates.

    " + }, + "nextPageToken":{ + "shape":"string", + "documentation":"

    If NextPageToken is returned there are more results available. The value of NextPageToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

    " } } }, @@ -7376,11 +7384,11 @@ }, "startTime":{ "shape":"IsoDate", - "documentation":"

    The cost estimate start time.

    Constraints:

    • Specified in Coordinated Universal Time (UTC).

    • Specified in the Unix time format.

      For example, if you wish to use a start time of October 1, 2018, at 8 PM UTC, specify 1538424000 as the start time.

    You can convert a human-friendly time to Unix time format using a converter like Epoch converter.

    " + "documentation":"

    The cost estimate start time.

    Constraints:

    • Specified in Coordinated Universal Time (UTC).

    • Specified in the Unix time format.

      For example, if you want to use a start time of October 1, 2018, at 8 PM UTC, specify 1538424000 as the start time.

    You can convert a human-friendly time to Unix time format using a converter like Epoch converter.

    " }, "endTime":{ "shape":"IsoDate", - "documentation":"

    The cost estimate end time.

    Constraints:

    • Specified in Coordinated Universal Time (UTC).

    • Specified in the Unix time format.

      For example, if you wish to use an end time of October 1, 2018, at 9 PM UTC, specify 1538427600 as the end time.

    You can convert a human-friendly time to Unix time format using a converter like Epoch converter.

    " + "documentation":"

    The cost estimate end time.

    Constraints:

    • Specified in Coordinated Universal Time (UTC).

    • Specified in the Unix time format.

      For example, if you want to use an end time of October 1, 2018, at 9 PM UTC, specify 1538427600 as the end time.

    You can convert a human-friendly time to Unix time format using a converter like Epoch converter.

    " } } }, diff --git a/services/location/pom.xml b/services/location/pom.xml index 124139491707..1684dc59ddeb 100644 --- a/services/location/pom.xml +++ b/services/location/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT location AWS Java SDK :: Services :: Location diff --git a/services/location/src/main/resources/codegen-resources/service-2.json b/services/location/src/main/resources/codegen-resources/service-2.json index c5dd73f14f9a..e58db3bed5ba 100644 --- a/services/location/src/main/resources/codegen-resources/service-2.json +++ b/services/location/src/main/resources/codegen-resources/service-2.json @@ -144,7 +144,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Uploads position update data for one or more devices to a tracker resource. Amazon Location uses the data when it reports the last known device position and position history. Amazon Location retains location data for 30 days.

    Position updates are handled based on the PositionFiltering property of the tracker. When PositionFiltering is set to TimeBased, updates are evaluated against linked geofence collections, and location data is stored at a maximum of one position per 30 second interval. If your update frequency is more often than every 30 seconds, only one update per 30 seconds is stored for each unique device ID.

    When PositionFiltering is set to DistanceBased filtering, location data is stored and evaluated against linked geofence collections only if the device has moved more than 30 m (98.4 ft).

    When PositionFiltering is set to AccuracyBased filtering, location data is stored and evaluated against linked geofence collections only if the device has moved more than the measured accuracy. For example, if two consecutive updates from a device have a horizontal accuracy of 5 m and 10 m, the second update is neither stored or evaluated if the device has moved less than 15 m. If PositionFiltering is set to AccuracyBased filtering, Amazon Location uses the default value { \"Horizontal\": 0} when accuracy is not provided on a DevicePositionUpdate.

    ", + "documentation":"

    Uploads position update data for one or more devices to a tracker resource (up to 10 devices per batch). Amazon Location uses the data when it reports the last known device position and position history. Amazon Location retains location data for 30 days.

    Position updates are handled based on the PositionFiltering property of the tracker. When PositionFiltering is set to TimeBased, updates are evaluated against linked geofence collections, and location data is stored at a maximum of one position per 30 second interval. If your update frequency is more often than every 30 seconds, only one update per 30 seconds is stored for each unique device ID.

    When PositionFiltering is set to DistanceBased filtering, location data is stored and evaluated against linked geofence collections only if the device has moved more than 30 m (98.4 ft).

    When PositionFiltering is set to AccuracyBased filtering, location data is stored and evaluated against linked geofence collections only if the device has moved more than the measured accuracy. For example, if two consecutive updates from a device have a horizontal accuracy of 5 m and 10 m, the second update is neither stored or evaluated if the device has moved less than 15 m. If PositionFiltering is set to AccuracyBased filtering, Amazon Location uses the default value { \"Horizontal\": 0} when accuracy is not provided on a DevicePositionUpdate.

    ", "endpoint":{"hostPrefix":"tracking."} }, "CalculateRoute":{ @@ -1200,7 +1200,7 @@ "ApiKeyRestrictionsAllowActionsList":{ "type":"list", "member":{"shape":"ApiKeyAction"}, - "max":5, + "max":7, "min":1 }, "ApiKeyRestrictionsAllowReferersList":{ @@ -1574,6 +1574,10 @@ "shape":"Id", "documentation":"

    The identifier for the geofence to be stored in a given geofence collection.

    " }, + "GeofenceProperties":{ + "shape":"PropertyMap", + "documentation":"

    Specifies additional user-defined properties to store with the Geofence. An array of key-value pairs.

    " + }, "Geometry":{ "shape":"GeofenceGeometry", "documentation":"

    Contains the details of the position of the geofence. Can be either a polygon or a circle. Including both will return a validation error.

    Each geofence polygon can have a maximum of 1,000 vertices.

    " @@ -1667,7 +1671,7 @@ }, "Updates":{ "shape":"BatchUpdateDevicePositionRequestUpdatesList", - "documentation":"

    Contains the position update details for each device.

    " + "documentation":"

    Contains the position update details for each device, up to 10 devices.

    " } } }, @@ -2996,6 +3000,12 @@ "type":"double", "box":true }, + "FilterPlaceCategoryList":{ + "type":"list", + "member":{"shape":"PlaceCategory"}, + "max":5, + "min":1 + }, "GeoArn":{ "type":"string", "max":1600, @@ -3167,6 +3177,10 @@ "shape":"Id", "documentation":"

    The geofence identifier.

    " }, + "GeofenceProperties":{ + "shape":"PropertyMap", + "documentation":"

    Contains additional user-defined properties stored with the geofence. An array of key-value pairs.

    " + }, "Geometry":{ "shape":"GeofenceGeometry", "documentation":"

    Contains the geofence geometry details describing a polygon or a circle.

    " @@ -3734,6 +3748,10 @@ "shape":"Id", "documentation":"

    The geofence identifier.

    " }, + "GeofenceProperties":{ + "shape":"PropertyMap", + "documentation":"

    Contains additional user-defined properties stored with the geofence. An array of key-value pairs.

    " + }, "Geometry":{ "shape":"GeofenceGeometry", "documentation":"

    Contains the geofence geometry details describing a polygon or a circle.

    " @@ -4273,6 +4291,10 @@ "shape":"String", "documentation":"

    The numerical portion of an address, such as a building number.

    " }, + "Categories":{ + "shape":"PlaceCategoryList", + "documentation":"

    The Amazon Location categories that describe this Place.

    For more information about using categories, including a list of Amazon Location categories, see Categories and filtering, in the Amazon Location Service Developer Guide.

    " + }, "Country":{ "shape":"String", "documentation":"

    A country/region specified using ISO 3166 3-digit country/region code. For example, CAN.

    " @@ -4310,21 +4332,36 @@ "shape":"String", "documentation":"

    A county, or an area that's part of a larger region. For example, Metro Vancouver.

    " }, + "SupplementalCategories":{ + "shape":"PlaceSupplementalCategoryList", + "documentation":"

    Categories from the data provider that describe the Place that are not mapped to any Amazon Location categories.

    " + }, "TimeZone":{ "shape":"TimeZone", - "documentation":"

    The time zone in which the Place is located. Returned only when using HERE as the selected partner.

    " + "documentation":"

    The time zone in which the Place is located. Returned only when using HERE or Grab as the selected partner.

    " }, "UnitNumber":{ "shape":"String", - "documentation":"

    For addresses with multiple units, the unit identifier. Can include numbers and letters, for example 3B or Unit 123.

    Returned only for a place index that uses Esri as a data provider. Is not returned for SearchPlaceIndexForPosition.

    " + "documentation":"

    For addresses with multiple units, the unit identifier. Can include numbers and letters, for example 3B or Unit 123.

    Returned only for a place index that uses Esri or Grab as a data provider. Is not returned for SearchPlaceIndexForPosition.

    " }, "UnitType":{ "shape":"String", - "documentation":"

    For addresses with a UnitNumber, the type of unit. For example, Apartment.

    " + "documentation":"

    For addresses with a UnitNumber, the type of unit. For example, Apartment.

    Returned only for a place index that uses Esri as a data provider.

    " } }, "documentation":"

    Contains details about addresses or points of interest that match the search criteria.

    Not all details are included with all responses. Some details may only be returned by specific data partners.

    " }, + "PlaceCategory":{ + "type":"string", + "max":35, + "min":0 + }, + "PlaceCategoryList":{ + "type":"list", + "member":{"shape":"PlaceCategory"}, + "max":10, + "min":1 + }, "PlaceGeometry":{ "type":"structure", "members":{ @@ -4341,6 +4378,17 @@ "max":50, "min":1 }, + "PlaceSupplementalCategory":{ + "type":"string", + "max":35, + "min":0 + }, + "PlaceSupplementalCategoryList":{ + "type":"list", + "member":{"shape":"PlaceSupplementalCategory"}, + "max":10, + "min":1 + }, "Position":{ "type":"list", "member":{"shape":"Double"}, @@ -4419,6 +4467,10 @@ "location":"uri", "locationName":"GeofenceId" }, + "GeofenceProperties":{ + "shape":"PropertyMap", + "documentation":"

    Specifies additional user-defined properties to store with the Geofence. An array of key-value pairs.

    " + }, "Geometry":{ "shape":"GeofenceGeometry", "documentation":"

    Contains the details to specify the position of the geofence. Can be either a polygon or a circle. Including both will return a validation error.

    Each geofence polygon can have a maximum of 1,000 vertices.

    " @@ -4577,9 +4629,17 @@ "type":"structure", "required":["Text"], "members":{ + "Categories":{ + "shape":"PlaceCategoryList", + "documentation":"

    The Amazon Location categories that describe the Place.

    For more information about using categories, including a list of Amazon Location categories, see Categories and filtering, in the Amazon Location Service Developer Guide.

    " + }, "PlaceId":{ "shape":"PlaceId", - "documentation":"

    The unique identifier of the place. You can use this with the GetPlace operation to find the place again later.

    For SearchPlaceIndexForSuggestions operations, the PlaceId is returned by place indexes that use Esri, Grab, or HERE as data providers.

    " + "documentation":"

    The unique identifier of the Place. You can use this with the GetPlace operation to find the place again later, or to get full information for the Place.

    The GetPlace request must use the same PlaceIndex resource as the SearchPlaceIndexForSuggestions that generated the Place ID.

    For SearchPlaceIndexForSuggestions operations, the PlaceId is returned by place indexes that use Esri, Grab, or HERE as data providers.

    " + }, + "SupplementalCategories":{ + "shape":"PlaceSupplementalCategoryList", + "documentation":"

    Categories from the data provider that describe the Place that are not mapped to any Amazon Location categories.

    " }, "Text":{ "shape":"String", @@ -4715,6 +4775,10 @@ "shape":"BoundingBox", "documentation":"

    An optional parameter that limits the search results by returning only suggestions within a specified bounding box.

    If provided, this parameter must contain a total of four consecutive numbers in two pairs. The first pair of numbers represents the X and Y coordinates (longitude and latitude, respectively) of the southwest corner of the bounding box; the second pair of numbers represents the X and Y coordinates (longitude and latitude, respectively) of the northeast corner of the bounding box.

    For example, [-12.7935, -37.4835, -12.0684, -36.9542] represents a bounding box where the southwest corner has longitude -12.7935 and latitude -37.4835, and the northeast corner has longitude -12.0684 and latitude -36.9542.

    FilterBBox and BiasPosition are mutually exclusive. Specifying both options results in an error.

    " }, + "FilterCategories":{ + "shape":"FilterPlaceCategoryList", + "documentation":"

    A list of one or more Amazon Location categories to filter the returned places. If you include more than one category, the results will include results that match any of the categories listed.

    For more information about using categories, including a list of Amazon Location categories, see Categories and filtering, in the Amazon Location Service Developer Guide.

    " + }, "FilterCountries":{ "shape":"CountryCodeList", "documentation":"

    An optional parameter that limits the search results by returning only suggestions within the provided list of countries.

    • Use the ISO 3166 3-digit country code. For example, Australia uses three upper-case characters: AUS.

    " @@ -4787,6 +4851,10 @@ "shape":"BoundingBox", "documentation":"

    Contains the coordinates for the optional bounding box specified in the request.

    " }, + "FilterCategories":{ + "shape":"FilterPlaceCategoryList", + "documentation":"

    The optional category filter specified in the request.

    " + }, "FilterCountries":{ "shape":"CountryCodeList", "documentation":"

    Contains the optional country filter specified in the request.

    " @@ -4821,6 +4889,10 @@ "shape":"BoundingBox", "documentation":"

    An optional parameter that limits the search results by returning only places that are within the provided bounding box.

    If provided, this parameter must contain a total of four consecutive numbers in two pairs. The first pair of numbers represents the X and Y coordinates (longitude and latitude, respectively) of the southwest corner of the bounding box; the second pair of numbers represents the X and Y coordinates (longitude and latitude, respectively) of the northeast corner of the bounding box.

    For example, [-12.7935, -37.4835, -12.0684, -36.9542] represents a bounding box where the southwest corner has longitude -12.7935 and latitude -37.4835, and the northeast corner has longitude -12.0684 and latitude -36.9542.

    FilterBBox and BiasPosition are mutually exclusive. Specifying both options results in an error.

    " }, + "FilterCategories":{ + "shape":"FilterPlaceCategoryList", + "documentation":"

    A list of one or more Amazon Location categories to filter the returned places. If you include more than one category, the results will include results that match any of the categories listed.

    For more information about using categories, including a list of Amazon Location categories, see Categories and filtering, in the Amazon Location Service Developer Guide.

    " + }, "FilterCountries":{ "shape":"CountryCodeList", "documentation":"

    An optional parameter that limits the search results by returning only places that are in a specified list of countries.

    • Valid values include ISO 3166 3-digit country codes. For example, Australia uses three upper-case characters: AUS.

    " @@ -4887,6 +4959,10 @@ "shape":"BoundingBox", "documentation":"

    Contains the coordinates for the optional bounding box specified in the request.

    " }, + "FilterCategories":{ + "shape":"FilterPlaceCategoryList", + "documentation":"

    The optional category filter specified in the request.

    " + }, "FilterCountries":{ "shape":"CountryCodeList", "documentation":"

    Contains the optional country filter specified in the request.

    " diff --git a/services/lookoutequipment/pom.xml b/services/lookoutequipment/pom.xml index 1e2f81eb5526..47bb5128b986 100644 --- a/services/lookoutequipment/pom.xml +++ b/services/lookoutequipment/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT lookoutequipment AWS Java SDK :: Services :: Lookout Equipment diff --git a/services/lookoutmetrics/pom.xml b/services/lookoutmetrics/pom.xml index 355416987ee2..3859ff975773 100644 --- a/services/lookoutmetrics/pom.xml +++ b/services/lookoutmetrics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT lookoutmetrics AWS Java SDK :: Services :: Lookout Metrics diff --git a/services/lookoutvision/pom.xml b/services/lookoutvision/pom.xml index bd75c2f7a9fc..28c2b3dafb3a 100644 --- a/services/lookoutvision/pom.xml +++ b/services/lookoutvision/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT lookoutvision AWS Java SDK :: Services :: Lookout Vision diff --git a/services/m2/pom.xml b/services/m2/pom.xml index e4f799b1c51a..b7fc4572d0c3 100644 --- a/services/m2/pom.xml +++ b/services/m2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT m2 AWS Java SDK :: Services :: M2 diff --git a/services/machinelearning/pom.xml b/services/machinelearning/pom.xml index 671a892a6867..13bf0643fc29 100644 --- a/services/machinelearning/pom.xml +++ b/services/machinelearning/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT machinelearning AWS Java SDK :: Services :: Amazon Machine Learning diff --git a/services/macie/pom.xml b/services/macie/pom.xml index 16908e6cbc85..02e875ccb15d 100644 --- a/services/macie/pom.xml +++ b/services/macie/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT macie AWS Java SDK :: Services :: Macie diff --git a/services/macie2/pom.xml b/services/macie2/pom.xml index 0fdd5accd563..0006b71995e5 100644 --- a/services/macie2/pom.xml +++ b/services/macie2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT macie2 AWS Java SDK :: Services :: Macie2 diff --git a/services/managedblockchain/pom.xml b/services/managedblockchain/pom.xml index bb3ec8ce48c6..f1a94d4355ca 100644 --- a/services/managedblockchain/pom.xml +++ b/services/managedblockchain/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT managedblockchain AWS Java SDK :: Services :: ManagedBlockchain diff --git a/services/marketplacecatalog/pom.xml b/services/marketplacecatalog/pom.xml index fc045eb38136..22ba42a1c947 100644 --- a/services/marketplacecatalog/pom.xml +++ b/services/marketplacecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT marketplacecatalog AWS Java SDK :: Services :: Marketplace Catalog diff --git a/services/marketplacecommerceanalytics/pom.xml b/services/marketplacecommerceanalytics/pom.xml index 7475d1b1cb8f..c989b35a9877 100644 --- a/services/marketplacecommerceanalytics/pom.xml +++ b/services/marketplacecommerceanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT marketplacecommerceanalytics AWS Java SDK :: Services :: AWS Marketplace Commerce Analytics diff --git a/services/marketplaceentitlement/pom.xml b/services/marketplaceentitlement/pom.xml index cb2555ed325a..5a51d84be0be 100644 --- a/services/marketplaceentitlement/pom.xml +++ b/services/marketplaceentitlement/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT marketplaceentitlement AWS Java SDK :: Services :: AWS Marketplace Entitlement diff --git a/services/marketplacemetering/pom.xml b/services/marketplacemetering/pom.xml index 2a0bc6262e55..76df92aa025b 100644 --- a/services/marketplacemetering/pom.xml +++ b/services/marketplacemetering/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT marketplacemetering AWS Java SDK :: Services :: AWS Marketplace Metering Service diff --git a/services/mediaconnect/pom.xml b/services/mediaconnect/pom.xml index dd9e828b06ca..193de46ab591 100644 --- a/services/mediaconnect/pom.xml +++ b/services/mediaconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT mediaconnect AWS Java SDK :: Services :: MediaConnect diff --git a/services/mediaconvert/pom.xml b/services/mediaconvert/pom.xml index 6ec20a810358..a23ac5f651e4 100644 --- a/services/mediaconvert/pom.xml +++ b/services/mediaconvert/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 mediaconvert diff --git a/services/medialive/pom.xml b/services/medialive/pom.xml index 58ad44611d5b..324acce6df21 100644 --- a/services/medialive/pom.xml +++ b/services/medialive/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 medialive diff --git a/services/mediapackage/pom.xml b/services/mediapackage/pom.xml index 53ecc42680a4..c255c0e6d116 100644 --- a/services/mediapackage/pom.xml +++ b/services/mediapackage/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 mediapackage diff --git a/services/mediapackagev2/pom.xml b/services/mediapackagev2/pom.xml index 9cce160081b4..2edffa431e64 100644 --- a/services/mediapackagev2/pom.xml +++ b/services/mediapackagev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT mediapackagev2 AWS Java SDK :: Services :: Media Package V2 diff --git a/services/mediapackagevod/pom.xml b/services/mediapackagevod/pom.xml index 2dbd0df1a2d0..63e6a50cef33 100644 --- a/services/mediapackagevod/pom.xml +++ b/services/mediapackagevod/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT mediapackagevod AWS Java SDK :: Services :: MediaPackage Vod diff --git a/services/mediastore/pom.xml b/services/mediastore/pom.xml index 68f11bec4d9e..cd00d2dbe1f3 100644 --- a/services/mediastore/pom.xml +++ b/services/mediastore/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 mediastore diff --git a/services/mediastoredata/pom.xml b/services/mediastoredata/pom.xml index 7726707bc544..86c8e5424137 100644 --- a/services/mediastoredata/pom.xml +++ b/services/mediastoredata/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 mediastoredata diff --git a/services/mediatailor/pom.xml b/services/mediatailor/pom.xml index f8123f8d0e01..da6b08850d85 100644 --- a/services/mediatailor/pom.xml +++ b/services/mediatailor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT mediatailor AWS Java SDK :: Services :: MediaTailor diff --git a/services/memorydb/pom.xml b/services/memorydb/pom.xml index f648136d884e..b2866cb588f0 100644 --- a/services/memorydb/pom.xml +++ b/services/memorydb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT memorydb AWS Java SDK :: Services :: Memory DB diff --git a/services/mgn/pom.xml b/services/mgn/pom.xml index f916db870fe3..37b3ac1fe066 100644 --- a/services/mgn/pom.xml +++ b/services/mgn/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT mgn AWS Java SDK :: Services :: Mgn diff --git a/services/migrationhub/pom.xml b/services/migrationhub/pom.xml index b1244cd6f5e9..60a7195c28f8 100644 --- a/services/migrationhub/pom.xml +++ b/services/migrationhub/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 migrationhub diff --git a/services/migrationhubconfig/pom.xml b/services/migrationhubconfig/pom.xml index 6eda77cbdd8c..a93111dacb0d 100644 --- a/services/migrationhubconfig/pom.xml +++ b/services/migrationhubconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT migrationhubconfig AWS Java SDK :: Services :: MigrationHub Config diff --git a/services/migrationhuborchestrator/pom.xml b/services/migrationhuborchestrator/pom.xml index 1957169d67c5..a3a28ccfbe2d 100644 --- a/services/migrationhuborchestrator/pom.xml +++ b/services/migrationhuborchestrator/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT migrationhuborchestrator AWS Java SDK :: Services :: Migration Hub Orchestrator diff --git a/services/migrationhubrefactorspaces/pom.xml b/services/migrationhubrefactorspaces/pom.xml index 54da7e3da282..6639d784b354 100644 --- a/services/migrationhubrefactorspaces/pom.xml +++ b/services/migrationhubrefactorspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT migrationhubrefactorspaces AWS Java SDK :: Services :: Migration Hub Refactor Spaces diff --git a/services/migrationhubstrategy/pom.xml b/services/migrationhubstrategy/pom.xml index d76c4df9b184..e8be037bfb25 100644 --- a/services/migrationhubstrategy/pom.xml +++ b/services/migrationhubstrategy/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT migrationhubstrategy AWS Java SDK :: Services :: Migration Hub Strategy diff --git a/services/mobile/pom.xml b/services/mobile/pom.xml index 6d259b6fa4e2..e09887733479 100644 --- a/services/mobile/pom.xml +++ b/services/mobile/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 mobile diff --git a/services/mq/pom.xml b/services/mq/pom.xml index cc542a4f0521..1c1b7d061483 100644 --- a/services/mq/pom.xml +++ b/services/mq/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 mq diff --git a/services/mturk/pom.xml b/services/mturk/pom.xml index 362eff7532a4..eea8bb0c7c77 100644 --- a/services/mturk/pom.xml +++ b/services/mturk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT mturk AWS Java SDK :: Services :: Amazon Mechanical Turk Requester diff --git a/services/mwaa/pom.xml b/services/mwaa/pom.xml index cd7c794c23d7..7630a3e54cdb 100644 --- a/services/mwaa/pom.xml +++ b/services/mwaa/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT mwaa AWS Java SDK :: Services :: MWAA diff --git a/services/neptune/pom.xml b/services/neptune/pom.xml index 2734176f578c..13457d90bee0 100644 --- a/services/neptune/pom.xml +++ b/services/neptune/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT neptune AWS Java SDK :: Services :: Neptune diff --git a/services/networkfirewall/pom.xml b/services/networkfirewall/pom.xml index 170fb2177fd4..b7a92ebcaee7 100644 --- a/services/networkfirewall/pom.xml +++ b/services/networkfirewall/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT networkfirewall AWS Java SDK :: Services :: Network Firewall diff --git a/services/networkmanager/pom.xml b/services/networkmanager/pom.xml index e2e754b0211d..ede47945c752 100644 --- a/services/networkmanager/pom.xml +++ b/services/networkmanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT networkmanager AWS Java SDK :: Services :: NetworkManager diff --git a/services/nimble/pom.xml b/services/nimble/pom.xml index 3e4f8208cb08..118a2ef8dffb 100644 --- a/services/nimble/pom.xml +++ b/services/nimble/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT nimble AWS Java SDK :: Services :: Nimble diff --git a/services/oam/pom.xml b/services/oam/pom.xml index 78ad194c3086..faf9fa1c7e9b 100644 --- a/services/oam/pom.xml +++ b/services/oam/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT oam AWS Java SDK :: Services :: OAM diff --git a/services/omics/pom.xml b/services/omics/pom.xml index 94995d630e1d..1a0203509015 100644 --- a/services/omics/pom.xml +++ b/services/omics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT omics AWS Java SDK :: Services :: Omics diff --git a/services/opensearch/pom.xml b/services/opensearch/pom.xml index 0e5933d23c2d..012762652b18 100644 --- a/services/opensearch/pom.xml +++ b/services/opensearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT opensearch AWS Java SDK :: Services :: Open Search diff --git a/services/opensearch/src/main/resources/codegen-resources/service-2.json b/services/opensearch/src/main/resources/codegen-resources/service-2.json index 8198d4422e62..eb26e418a0c4 100644 --- a/services/opensearch/src/main/resources/codegen-resources/service-2.json +++ b/services/opensearch/src/main/resources/codegen-resources/service-2.json @@ -1763,7 +1763,11 @@ "members":{ "Endpoint":{ "shape":"Endpoint", - "documentation":"

    The endpoint of the remote domain.

    " + "documentation":"

    The Endpoint attribute cannot be modified.

    The endpoint of the remote domain. Applicable for VPC_ENDPOINT connection mode.

    " + }, + "CrossClusterSearch":{ + "shape":"CrossClusterSearchConnectionProperties", + "documentation":"

    The connection properties for cross cluster search.

    " } }, "documentation":"

    The connection properties of an outbound connection.

    " @@ -1880,6 +1884,10 @@ "ConnectionMode":{ "shape":"ConnectionMode", "documentation":"

    The connection mode.

    " + }, + "ConnectionProperties":{ + "shape":"ConnectionProperties", + "documentation":"

    The ConnectionProperties for the outbound connection.

    " } }, "documentation":"

    Container for the parameters to the CreateOutboundConnection operation.

    " @@ -1987,6 +1995,16 @@ } }, "CreatedAt":{"type":"timestamp"}, + "CrossClusterSearchConnectionProperties":{ + "type":"structure", + "members":{ + "SkipUnavailable":{ + "shape":"SkipUnavailableStatus", + "documentation":"

    Status of SkipUnavailable param for outbound connection.

    " + } + }, + "documentation":"

    Cross cluster search specific connection properties.

    " + }, "DeleteDomainRequest":{ "type":"structure", "required":["DomainName"], @@ -2529,8 +2547,7 @@ }, "DescribePackagesFilterValues":{ "type":"list", - "member":{"shape":"DescribePackagesFilterValue"}, - "min":1 + "member":{"shape":"DescribePackagesFilterValue"} }, "DescribePackagesRequest":{ "type":"structure", @@ -5136,6 +5153,14 @@ "type":"string", "documentation":"

    The domain endpoint to which index and search requests are submitted. For example, search-imdb-movies-oopcnjfn6ugo.eu-west-1.es.amazonaws.com or doc-imdb-movies-oopcnjfn6u.eu-west-1.es.amazonaws.com.

    " }, + "SkipUnavailableStatus":{ + "type":"string", + "documentation":"

    Status of SkipUnavailable param for outbound connection.

    • ENABLED - The SkipUnavailable param is enabled for the connection.

    • DISABLED - The SkipUnavailable param is disabled for the connection.

    ", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "SlotList":{ "type":"list", "member":{"shape":"Long"} diff --git a/services/opensearchserverless/pom.xml b/services/opensearchserverless/pom.xml index 5f8ba0fe114f..a9aaf946a4f3 100644 --- a/services/opensearchserverless/pom.xml +++ b/services/opensearchserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT opensearchserverless AWS Java SDK :: Services :: Open Search Serverless diff --git a/services/opsworks/pom.xml b/services/opsworks/pom.xml index 0cfeeeaa9b85..044803896635 100644 --- a/services/opsworks/pom.xml +++ b/services/opsworks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT opsworks AWS Java SDK :: Services :: AWS OpsWorks diff --git a/services/opsworkscm/pom.xml b/services/opsworkscm/pom.xml index f6547d6f8ada..f9bfb7a2c65e 100644 --- a/services/opsworkscm/pom.xml +++ b/services/opsworkscm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT opsworkscm AWS Java SDK :: Services :: AWS OpsWorks for Chef Automate diff --git a/services/organizations/pom.xml b/services/organizations/pom.xml index 54b813cfd1f0..1bc9110d25ea 100644 --- a/services/organizations/pom.xml +++ b/services/organizations/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT organizations AWS Java SDK :: Services :: AWS Organizations diff --git a/services/osis/pom.xml b/services/osis/pom.xml index a174db6d7502..651c5815bb6a 100644 --- a/services/osis/pom.xml +++ b/services/osis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT osis AWS Java SDK :: Services :: OSIS diff --git a/services/outposts/pom.xml b/services/outposts/pom.xml index 2a94d1712c5a..6ff89e356011 100644 --- a/services/outposts/pom.xml +++ b/services/outposts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT outposts AWS Java SDK :: Services :: Outposts diff --git a/services/panorama/pom.xml b/services/panorama/pom.xml index bcf49b1b245d..96d0da92ee5e 100644 --- a/services/panorama/pom.xml +++ b/services/panorama/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT panorama AWS Java SDK :: Services :: Panorama diff --git a/services/paymentcryptography/pom.xml b/services/paymentcryptography/pom.xml index 9b35e8f4378b..747c0529d7ce 100644 --- a/services/paymentcryptography/pom.xml +++ b/services/paymentcryptography/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT paymentcryptography AWS Java SDK :: Services :: Payment Cryptography diff --git a/services/paymentcryptographydata/pom.xml b/services/paymentcryptographydata/pom.xml index a81b2b25b4ce..b5ffec7a6c51 100644 --- a/services/paymentcryptographydata/pom.xml +++ b/services/paymentcryptographydata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT paymentcryptographydata AWS Java SDK :: Services :: Payment Cryptography Data diff --git a/services/personalize/pom.xml b/services/personalize/pom.xml index 8d5743f552a7..f101ac7624ba 100644 --- a/services/personalize/pom.xml +++ b/services/personalize/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT personalize AWS Java SDK :: Services :: Personalize diff --git a/services/personalizeevents/pom.xml b/services/personalizeevents/pom.xml index 85e7383b0d63..07c36be6d0e1 100644 --- a/services/personalizeevents/pom.xml +++ b/services/personalizeevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT personalizeevents AWS Java SDK :: Services :: Personalize Events diff --git a/services/personalizeruntime/pom.xml b/services/personalizeruntime/pom.xml index 7c85db480744..ab700f23fce0 100644 --- a/services/personalizeruntime/pom.xml +++ b/services/personalizeruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT personalizeruntime AWS Java SDK :: Services :: Personalize Runtime diff --git a/services/pi/pom.xml b/services/pi/pom.xml index 627fe9af362e..1a35516ce4cc 100644 --- a/services/pi/pom.xml +++ b/services/pi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT pi AWS Java SDK :: Services :: PI diff --git a/services/pinpoint/pom.xml b/services/pinpoint/pom.xml index ad55d6807d67..c7faf716739f 100644 --- a/services/pinpoint/pom.xml +++ b/services/pinpoint/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT pinpoint AWS Java SDK :: Services :: Amazon Pinpoint diff --git a/services/pinpointemail/pom.xml b/services/pinpointemail/pom.xml index dba3079cfd33..5eac3c77f4bf 100644 --- a/services/pinpointemail/pom.xml +++ b/services/pinpointemail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT pinpointemail AWS Java SDK :: Services :: Pinpoint Email diff --git a/services/pinpointsmsvoice/pom.xml b/services/pinpointsmsvoice/pom.xml index a508015e48dd..f4236f00568f 100644 --- a/services/pinpointsmsvoice/pom.xml +++ b/services/pinpointsmsvoice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT pinpointsmsvoice AWS Java SDK :: Services :: Pinpoint SMS Voice diff --git a/services/pinpointsmsvoicev2/pom.xml b/services/pinpointsmsvoicev2/pom.xml index a22ecf4f6806..07ebc58d7cb3 100644 --- a/services/pinpointsmsvoicev2/pom.xml +++ b/services/pinpointsmsvoicev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT pinpointsmsvoicev2 AWS Java SDK :: Services :: Pinpoint SMS Voice V2 diff --git a/services/pipes/pom.xml b/services/pipes/pom.xml index e0607cfbff25..498d06b752de 100644 --- a/services/pipes/pom.xml +++ b/services/pipes/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT pipes AWS Java SDK :: Services :: Pipes diff --git a/services/polly/pom.xml b/services/polly/pom.xml index 8332f3c6f578..180a4af1c9e8 100644 --- a/services/polly/pom.xml +++ b/services/polly/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT polly AWS Java SDK :: Services :: Amazon Polly diff --git a/services/pom.xml b/services/pom.xml index f2f30d28f20a..0ee00979dd80 100644 --- a/services/pom.xml +++ b/services/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT services AWS Java SDK :: Services @@ -367,6 +367,8 @@ mediapackagev2 paymentcryptographydata paymentcryptography + codegurusecurity + verifiedpermissions The AWS Java SDK services https://aws.amazon.com/sdkforjava diff --git a/services/pricing/pom.xml b/services/pricing/pom.xml index ed45f1482f48..8d7bc9854b4f 100644 --- a/services/pricing/pom.xml +++ b/services/pricing/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 pricing diff --git a/services/pricing/src/main/resources/codegen-resources/endpoint-tests.json b/services/pricing/src/main/resources/codegen-resources/endpoint-tests.json index 6d77c402ab9b..db676384e56c 100644 --- a/services/pricing/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/pricing/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { + "Region": "ap-south-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-south-1" + "UseDualStack": false } }, { @@ -21,9 +21,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -47,9 +47,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -73,9 +73,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -86,9 +86,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -99,9 +99,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -112,9 +112,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -125,9 +125,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -138,9 +138,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -151,9 +151,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -164,9 +164,20 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -177,9 +188,20 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -190,9 +212,20 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -203,9 +236,20 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -216,9 +260,9 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -229,9 +273,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -254,9 +298,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -266,11 +310,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1", "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/pricing/src/main/resources/codegen-resources/service-2.json b/services/pricing/src/main/resources/codegen-resources/service-2.json index 1b648c420ccb..699d05c1b3df 100644 --- a/services/pricing/src/main/resources/codegen-resources/service-2.json +++ b/services/pricing/src/main/resources/codegen-resources/service-2.json @@ -23,10 +23,10 @@ "input":{"shape":"DescribeServicesRequest"}, "output":{"shape":"DescribeServicesResponse"}, "errors":[ - {"shape":"InternalErrorException"}, {"shape":"InvalidParameterException"}, - {"shape":"NotFoundException"}, {"shape":"InvalidNextTokenException"}, + {"shape":"NotFoundException"}, + {"shape":"InternalErrorException"}, {"shape":"ExpiredNextTokenException"} ], "documentation":"

    Returns the metadata for one service or a list of the metadata for all services. Use this without a service code to get the service codes for all services. Use it with a service code, such as AmazonEC2, to get information specific to that service, such as the attribute names available for that service. For example, some of the attribute names available for EC2 are volumeType, maxIopsVolume, operation, locationType, and instanceCapacity10xlarge.

    " @@ -40,10 +40,10 @@ "input":{"shape":"GetAttributeValuesRequest"}, "output":{"shape":"GetAttributeValuesResponse"}, "errors":[ - {"shape":"InternalErrorException"}, {"shape":"InvalidParameterException"}, - {"shape":"NotFoundException"}, {"shape":"InvalidNextTokenException"}, + {"shape":"NotFoundException"}, + {"shape":"InternalErrorException"}, {"shape":"ExpiredNextTokenException"} ], "documentation":"

    Returns a list of attribute values. Attributes are similar to the details in a Price List API offer file. For a list of available attributes, see Offer File Definitions in the Billing and Cost Management User Guide.

    " @@ -57,10 +57,10 @@ "input":{"shape":"GetPriceListFileUrlRequest"}, "output":{"shape":"GetPriceListFileUrlResponse"}, "errors":[ - {"shape":"InternalErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"NotFoundException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"InternalErrorException"} ], "documentation":"

    This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10).

    This returns the URL that you can retrieve your Price List file from. This URL is based on the PriceListArn and FileFormat that you retrieve from the ListPriceLists response.

    " }, @@ -73,10 +73,10 @@ "input":{"shape":"GetProductsRequest"}, "output":{"shape":"GetProductsResponse"}, "errors":[ - {"shape":"InternalErrorException"}, {"shape":"InvalidParameterException"}, - {"shape":"NotFoundException"}, {"shape":"InvalidNextTokenException"}, + {"shape":"NotFoundException"}, + {"shape":"InternalErrorException"}, {"shape":"ExpiredNextTokenException"} ], "documentation":"

    Returns a list of all products that match the filter criteria.

    " @@ -90,12 +90,12 @@ "input":{"shape":"ListPriceListsRequest"}, "output":{"shape":"ListPriceListsResponse"}, "errors":[ - {"shape":"InternalErrorException"}, {"shape":"InvalidParameterException"}, - {"shape":"NotFoundException"}, {"shape":"InvalidNextTokenException"}, - {"shape":"ExpiredNextTokenException"}, - {"shape":"AccessDeniedException"} + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalErrorException"}, + {"shape":"ExpiredNextTokenException"} ], "documentation":"

    This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10).

    This returns a list of Price List references that the requester if authorized to view, given a ServiceCode, CurrencyCode, and an EffectiveDate. Use without a RegionCode filter to list Price List references from all available Amazon Web Services Regions. Use with a RegionCode filter to get the Price List reference that's specific to a specific Amazon Web Services Region. You can use the PriceListArn from the response to get your preferred Price List files through the GetPriceListFileUrl API.

    " } @@ -129,12 +129,13 @@ }, "BoxedInteger":{ "type":"integer", + "box":true, "max":100, "min":1 }, "CurrencyCode":{ "type":"string", - "pattern":"^[A-Z]{3}$" + "pattern":"[A-Z]{3}" }, "DescribeServicesRequest":{ "type":"structure", @@ -153,8 +154,7 @@ }, "MaxResults":{ "shape":"BoxedInteger", - "documentation":"

    The maximum number of results that you want returned in the response.

    ", - "box":true + "documentation":"

    The maximum number of results that you want returned in the response.

    " } } }, @@ -245,8 +245,7 @@ }, "MaxResults":{ "shape":"BoxedInteger", - "documentation":"

    The maximum number of results to return in response.

    ", - "box":true + "documentation":"

    The maximum number of results to return in response.

    " } } }, @@ -311,8 +310,7 @@ }, "MaxResults":{ "shape":"BoxedInteger", - "documentation":"

    The maximum number of results to return in the response.

    ", - "box":true + "documentation":"

    The maximum number of results to return in the response.

    " } } }, @@ -339,7 +337,8 @@ "Message":{"shape":"errorMessage"} }, "documentation":"

    An error on the server occurred during the processing of your request. Try again later.

    ", - "exception":true + "exception":true, + "fault":true }, "InvalidNextTokenException":{ "type":"structure", @@ -406,6 +405,7 @@ }, "MaxResults":{ "type":"integer", + "box":true, "max":100, "min":1 }, @@ -443,13 +443,12 @@ "type":"string", "max":2048, "min":18, - "pattern":"^arn:.+:pricing::.*:price-list/.{1,255}/.{1,32}/[A-Z]{3}/[0-9]{14}/[^/]*$" + "pattern":"arn:[A-Za-z0-9][-.A-Za-z0-9]{0,62}:pricing:::price-list/[A-Za-z0-9_/.-]{1,1023}" }, - "PriceListJsonItem":{"type":"string"}, "PriceListJsonItems":{ "type":"list", "member":{ - "shape":"PriceListJsonItem", + "shape":"SynthesizedJsonPriceListJsonItem", "jsonvalue":true } }, @@ -487,7 +486,8 @@ "member":{"shape":"Service"} }, "String":{"type":"string"}, + "SynthesizedJsonPriceListJsonItem":{"type":"string"}, "errorMessage":{"type":"string"} }, - "documentation":"

    Amazon Web Services Price List API is a centralized and convenient way to programmatically query Amazon Web Services for services, products, and pricing information. The Amazon Web Services Price List uses standardized product attributes such as Location, Storage Class, and Operating System, and provides prices at the SKU level. You can use the Amazon Web Services Price List to build cost control and scenario planning tools, reconcile billing data, forecast future spend for budgeting purposes, and provide cost benefit analysis that compare your internal workloads with Amazon Web Services.

    Use GetServices without a service code to retrieve the service codes for all AWS services, then GetServices with a service code to retrieve the attribute names for that service. After you have the service code and attribute names, you can use GetAttributeValues to see what values are available for an attribute. With the service code and an attribute name and value, you can use GetProducts to find specific products that you're interested in, such as an AmazonEC2 instance, with a Provisioned IOPS volumeType.

    Service Endpoint

    Amazon Web Services Price List service API provides the following two endpoints:

    • https://api.pricing.us-east-1.amazonaws.com

    • https://api.pricing.ap-south-1.amazonaws.com

    " + "documentation":"

    The Amazon Web Services Price List API is a centralized and convenient way to programmatically query Amazon Web Services for services, products, and pricing information. The Amazon Web Services Price List uses standardized product attributes such as Location, Storage Class, and Operating System, and provides prices at the SKU level. You can use the Amazon Web Services Price List to do the following:

    • Build cost control and scenario planning tools

    • Reconcile billing data

    • Forecast future spend for budgeting purposes

    • Provide cost benefit analysis that compare your internal workloads with Amazon Web Services

    Use GetServices without a service code to retrieve the service codes for all Amazon Web Services, then GetServices with a service code to retrieve the attribute names for that service. After you have the service code and attribute names, you can use GetAttributeValues to see what values are available for an attribute. With the service code and an attribute name and value, you can use GetProducts to find specific products that you're interested in, such as an AmazonEC2 instance, with a Provisioned IOPS volumeType.

    You can use the following endpoints for the Amazon Web Services Price List API:

    • https://api.pricing.us-east-1.amazonaws.com

    • https://api.pricing.ap-south-1.amazonaws.com

    " } diff --git a/services/pricing/src/main/resources/codegen-resources/waiters-2.json b/services/pricing/src/main/resources/codegen-resources/waiters-2.json new file mode 100644 index 000000000000..13f60ee66be6 --- /dev/null +++ b/services/pricing/src/main/resources/codegen-resources/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/services/privatenetworks/pom.xml b/services/privatenetworks/pom.xml index e8c0ccb5dbb1..488a1985d85c 100644 --- a/services/privatenetworks/pom.xml +++ b/services/privatenetworks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT privatenetworks AWS Java SDK :: Services :: Private Networks diff --git a/services/proton/pom.xml b/services/proton/pom.xml index 794043d43afa..2fb347ae76f4 100644 --- a/services/proton/pom.xml +++ b/services/proton/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT proton AWS Java SDK :: Services :: Proton diff --git a/services/qldb/pom.xml b/services/qldb/pom.xml index 16da9fe5f804..fa5d4d4dd93c 100644 --- a/services/qldb/pom.xml +++ b/services/qldb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT qldb AWS Java SDK :: Services :: QLDB diff --git a/services/qldbsession/pom.xml b/services/qldbsession/pom.xml index d3ed3caef2f8..83a5acb90eff 100644 --- a/services/qldbsession/pom.xml +++ b/services/qldbsession/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT qldbsession AWS Java SDK :: Services :: QLDB Session diff --git a/services/quicksight/pom.xml b/services/quicksight/pom.xml index 867a7de1c20a..e151e4b5cd6b 100644 --- a/services/quicksight/pom.xml +++ b/services/quicksight/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT quicksight AWS Java SDK :: Services :: QuickSight diff --git a/services/ram/pom.xml b/services/ram/pom.xml index a78439a4d090..c2fb490f8f83 100644 --- a/services/ram/pom.xml +++ b/services/ram/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ram AWS Java SDK :: Services :: RAM diff --git a/services/rbin/pom.xml b/services/rbin/pom.xml index 8d644d128bd5..6b0bcff1e618 100644 --- a/services/rbin/pom.xml +++ b/services/rbin/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT rbin AWS Java SDK :: Services :: Rbin diff --git a/services/rds/pom.xml b/services/rds/pom.xml index a3bc8f5e4dc7..76b880a786f0 100644 --- a/services/rds/pom.xml +++ b/services/rds/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT rds AWS Java SDK :: Services :: Amazon RDS diff --git a/services/rdsdata/pom.xml b/services/rdsdata/pom.xml index 8d8579f4dd47..4f884a148f69 100644 --- a/services/rdsdata/pom.xml +++ b/services/rdsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT rdsdata AWS Java SDK :: Services :: RDS Data diff --git a/services/redshift/pom.xml b/services/redshift/pom.xml index a6b9b1148cd1..df6ab7bdf62e 100644 --- a/services/redshift/pom.xml +++ b/services/redshift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT redshift AWS Java SDK :: Services :: Amazon Redshift diff --git a/services/redshift/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/redshift/src/main/resources/codegen-resources/endpoint-rule-set.json index fb0c8cf49517..af8ff95a9d0f 100644 --- a/services/redshift/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/redshift/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -349,44 +349,6 @@ "conditions": [], "type": "tree", "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "us-gov-east-1" - ] - } - ], - "endpoint": { - "url": "https://redshift.us-gov-east-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "us-gov-west-1" - ] - } - ], - "endpoint": { - "url": "https://redshift.us-gov-west-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, { "conditions": [], "endpoint": { diff --git a/services/redshift/src/main/resources/codegen-resources/endpoint-tests.json b/services/redshift/src/main/resources/codegen-resources/endpoint-tests.json index 5603237a0d85..a065a7553eea 100644 --- a/services/redshift/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/redshift/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "af-south-1", "UseFIPS": false, - "Region": "af-south-1" + "UseDualStack": false } }, { @@ -21,9 +21,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-east-1", "UseFIPS": false, - "Region": "ap-east-1" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-1", "UseFIPS": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { @@ -47,9 +47,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-2", "UseFIPS": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-3", "UseFIPS": false, - "Region": "ap-northeast-3" + "UseDualStack": false } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-south-1", "UseFIPS": false, - "Region": "ap-south-1" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-1", "UseFIPS": false, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { @@ -99,9 +99,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { @@ -112,9 +112,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-3", "UseFIPS": false, - "Region": "ap-southeast-3" + "UseDualStack": false } }, { @@ -125,9 +125,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ca-central-1", "UseFIPS": false, - "Region": "ca-central-1" + "UseDualStack": false } }, { @@ -138,9 +138,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ca-central-1", "UseFIPS": true, - "Region": "ca-central-1" + "UseDualStack": false } }, { @@ -151,9 +151,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-central-1", "UseFIPS": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { @@ -164,9 +164,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-north-1", "UseFIPS": false, - "Region": "eu-north-1" + "UseDualStack": false } }, { @@ -177,9 +177,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-south-1", "UseFIPS": false, - "Region": "eu-south-1" + "UseDualStack": false } }, { @@ -190,9 +190,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-west-1", "UseFIPS": false, - "Region": "eu-west-1" + "UseDualStack": false } }, { @@ -203,9 +203,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-west-2", "UseFIPS": false, - "Region": "eu-west-2" + "UseDualStack": false } }, { @@ -216,9 +216,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-west-3", "UseFIPS": false, - "Region": "eu-west-3" + "UseDualStack": false } }, { @@ -229,9 +229,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "me-south-1", "UseFIPS": false, - "Region": "me-south-1" + "UseDualStack": false } }, { @@ -242,9 +242,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "sa-east-1", "UseFIPS": false, - "Region": "sa-east-1" + "UseDualStack": false } }, { @@ -255,9 +255,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -268,9 +268,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -281,9 +281,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-2", "UseFIPS": false, - "Region": "us-east-2" + "UseDualStack": false } }, { @@ -294,9 +294,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-2", "UseFIPS": true, - "Region": "us-east-2" + "UseDualStack": false } }, { @@ -307,9 +307,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-1", "UseFIPS": false, - "Region": "us-west-1" + "UseDualStack": false } }, { @@ -320,9 +320,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-1", "UseFIPS": true, - "Region": "us-west-1" + "UseDualStack": false } }, { @@ -333,9 +333,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": false, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -346,9 +346,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": true, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -359,9 +359,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -372,9 +372,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -385,9 +385,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -398,9 +398,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-northwest-1", "UseFIPS": false, - "Region": "cn-northwest-1" + "UseDualStack": false } }, { @@ -411,9 +411,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -424,9 +424,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -437,9 +437,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -450,9 +450,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -463,9 +463,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -476,9 +476,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-west-1", "UseFIPS": false, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { @@ -489,9 +489,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-west-1", "UseFIPS": true, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { @@ -502,9 +502,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -515,9 +515,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -528,9 +528,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -541,9 +541,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-west-1", "UseFIPS": false, - "Region": "us-iso-west-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -554,9 +565,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -567,9 +589,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -580,9 +613,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -593,9 +637,9 @@ } }, "params": { - "UseDualStack": false, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -607,8 +651,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -618,9 +662,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -630,11 +674,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/redshift/src/main/resources/codegen-resources/paginators-1.json b/services/redshift/src/main/resources/codegen-resources/paginators-1.json index f2cb73e2214e..9fe0f863f2f7 100644 --- a/services/redshift/src/main/resources/codegen-resources/paginators-1.json +++ b/services/redshift/src/main/resources/codegen-resources/paginators-1.json @@ -54,6 +54,12 @@ "output_token": "Marker", "result_key": "Clusters" }, + "DescribeCustomDomainAssociations": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "Associations" + }, "DescribeDataShares": { "input_token": "Marker", "limit_key": "MaxRecords", diff --git a/services/redshift/src/main/resources/codegen-resources/service-2.json b/services/redshift/src/main/resources/codegen-resources/service-2.json index 647883d62f27..74695183016f 100644 --- a/services/redshift/src/main/resources/codegen-resources/service-2.json +++ b/services/redshift/src/main/resources/codegen-resources/service-2.json @@ -356,6 +356,24 @@ ], "documentation":"

    Creates a new Amazon Redshift subnet group. You must provide a list of one or more subnets in your existing Amazon Virtual Private Cloud (Amazon VPC) when creating Amazon Redshift subnet group.

    For information about subnet groups, go to Amazon Redshift Cluster Subnet Groups in the Amazon Redshift Cluster Management Guide.

    " }, + "CreateCustomDomainAssociation":{ + "name":"CreateCustomDomainAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCustomDomainAssociationMessage"}, + "output":{ + "shape":"CreateCustomDomainAssociationResult", + "resultWrapper":"CreateCustomDomainAssociationResult" + }, + "errors":[ + {"shape":"UnsupportedOperationFault"}, + {"shape":"ClusterNotFoundFault"}, + {"shape":"CustomCnameAssociationFault"} + ], + "documentation":"

    Used to create a custom domain name for a cluster. Properties include the custom domain name, the cluster the custom domain is associated with, and the certificate Amazon Resource Name (ARN).

    " + }, "CreateEndpointAccess":{ "name":"CreateEndpointAccess", "http":{ @@ -655,6 +673,20 @@ ], "documentation":"

    Deletes the specified cluster subnet group.

    " }, + "DeleteCustomDomainAssociation":{ + "name":"DeleteCustomDomainAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCustomDomainAssociationMessage"}, + "errors":[ + {"shape":"UnsupportedOperationFault"}, + {"shape":"ClusterNotFoundFault"}, + {"shape":"CustomCnameAssociationFault"} + ], + "documentation":"

    Contains information about deleting a custom domain association for a cluster.

    " + }, "DeleteEndpointAccess":{ "name":"DeleteEndpointAccess", "http":{ @@ -977,6 +1009,23 @@ ], "documentation":"

    Returns properties of provisioned clusters including general cluster properties, cluster database properties, maintenance and backup properties, and security and access properties. This operation supports pagination. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

    If you specify both tag keys and tag values in the same request, Amazon Redshift returns all clusters that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all clusters that have any combination of those values are returned.

    If both tag keys and values are omitted from the request, clusters are returned regardless of whether they have tag keys or values associated with them.

    " }, + "DescribeCustomDomainAssociations":{ + "name":"DescribeCustomDomainAssociations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCustomDomainAssociationsMessage"}, + "output":{ + "shape":"CustomDomainAssociationsMessage", + "resultWrapper":"DescribeCustomDomainAssociationsResult" + }, + "errors":[ + {"shape":"CustomDomainAssociationNotFoundFault"}, + {"shape":"UnsupportedOperationFault"} + ], + "documentation":"

    Contains information for custom domain associations for a cluster.

    " + }, "DescribeDataShares":{ "name":"DescribeDataShares", "http":{ @@ -1640,7 +1689,9 @@ {"shape":"InvalidElasticIpFault"}, {"shape":"TableLimitExceededFault"}, {"shape":"InvalidClusterTrackFault"}, - {"shape":"InvalidRetentionPeriodFault"} + {"shape":"InvalidRetentionPeriodFault"}, + {"shape":"UnsupportedOperationFault"}, + {"shape":"CustomCnameAssociationFault"} ], "documentation":"

    Modifies the settings for a cluster.

    You can also change node type and the number of nodes to scale up or down the cluster. When resizing a cluster, you must specify both the number of nodes and the node type even if one of the parameters does not change.

    You can add another security or parameter group, or change the admin user password. Resetting a cluster password or modifying the security groups associated with a cluster do not need a reboot. However, modifying a parameter group requires a reboot for parameters to take effect. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

    " }, @@ -1766,6 +1817,24 @@ ], "documentation":"

    Modifies a cluster subnet group to include the specified list of VPC subnets. The operation replaces the existing list of subnets with the new list of subnets.

    " }, + "ModifyCustomDomainAssociation":{ + "name":"ModifyCustomDomainAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyCustomDomainAssociationMessage"}, + "output":{ + "shape":"ModifyCustomDomainAssociationResult", + "resultWrapper":"ModifyCustomDomainAssociationResult" + }, + "errors":[ + {"shape":"UnsupportedOperationFault"}, + {"shape":"ClusterNotFoundFault"}, + {"shape":"CustomCnameAssociationFault"} + ], + "documentation":"

    Contains information for changing a custom domain association.

    " + }, "ModifyEndpointAccess":{ "name":"ModifyEndpointAccess", "http":{ @@ -2348,6 +2417,32 @@ "locationName":"ClusterAssociatedToSchedule" } }, + "Association":{ + "type":"structure", + "members":{ + "CustomDomainCertificateArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) for the certificate associated with the custom domain.

    " + }, + "CustomDomainCertificateExpiryDate":{ + "shape":"TStamp", + "documentation":"

    The expiration date for the certificate.

    " + }, + "CertificateAssociations":{ + "shape":"CertificateAssociationList", + "documentation":"

    A list of all associated clusters and domain names tied to a specific certificate.

    " + } + }, + "documentation":"

    Contains information about the custom domain name association.

    ", + "wrapper":true + }, + "AssociationList":{ + "type":"list", + "member":{ + "shape":"Association", + "locationName":"Association" + } + }, "AttributeList":{ "type":"list", "member":{ @@ -2559,7 +2654,7 @@ }, "SnapshotClusterIdentifier":{ "shape":"String", - "documentation":"

    The identifier of the cluster the snapshot was created from. This parameter is required if your IAM user or role has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

    " + "documentation":"

    The identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

    " }, "AccountWithRestoreAccess":{ "shape":"String", @@ -2712,6 +2807,27 @@ } } }, + "CertificateAssociation":{ + "type":"structure", + "members":{ + "CustomDomainName":{ + "shape":"String", + "documentation":"

    The custom domain name for the certificate association.

    " + }, + "ClusterIdentifier":{ + "shape":"String", + "documentation":"

    The cluster identifier for the certificate association.

    " + } + }, + "documentation":"

    A cluster ID and custom domain name tied to a specific certificate. These are typically returned in a list.

    " + }, + "CertificateAssociationList":{ + "type":"list", + "member":{ + "shape":"CertificateAssociation", + "locationName":"CertificateAssociation" + } + }, "Cluster":{ "type":"structure", "members":{ @@ -2922,6 +3038,18 @@ "ReservedNodeExchangeStatus":{ "shape":"ReservedNodeExchangeStatus", "documentation":"

    The status of the reserved-node exchange request. Statuses include in-progress and requested.

    " + }, + "CustomDomainName":{ + "shape":"String", + "documentation":"

    The custom domain name associated with the cluster.

    " + }, + "CustomDomainCertificateArn":{ + "shape":"String", + "documentation":"

    The certificate Amazon Resource Name (ARN) for the custom domain name.

    " + }, + "CustomDomainCertificateExpiryDate":{ + "shape":"TStamp", + "documentation":"

    The expiration date for the certificate associated with the custom domain name.

    " } }, "documentation":"

    Describes a cluster.

    ", @@ -3604,7 +3732,7 @@ }, "SourceSnapshotClusterIdentifier":{ "shape":"String", - "documentation":"

    The identifier of the cluster the source snapshot was created from. This parameter is required if your IAM user or role has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

    Constraints:

    • Must be the identifier for a valid cluster.

    " + "documentation":"

    The identifier of the cluster the source snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

    Constraints:

    • Must be the identifier for a valid cluster.

    " }, "TargetSnapshotIdentifier":{ "shape":"String", @@ -3692,11 +3820,11 @@ }, "MasterUsername":{ "shape":"String", - "documentation":"

    The user name associated with the admin user for the cluster that is being created.

    Constraints:

    • Must be 1 - 128 alphanumeric characters or hyphens. The user name can't be PUBLIC.

    • Must contain only lowercase letters, numbers, underscore, plus sign, period (dot), at symbol (@), or hyphen.

    • The first character must be a letter.

    • Must not contain a colon (:) or a slash (/).

    • Cannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.

    " + "documentation":"

    The user name associated with the admin user account for the cluster that is being created.

    Constraints:

    • Must be 1 - 128 alphanumeric characters or hyphens. The user name can't be PUBLIC.

    • Must contain only lowercase letters, numbers, underscore, plus sign, period (dot), at symbol (@), or hyphen.

    • The first character must be a letter.

    • Must not contain a colon (:) or a slash (/).

    • Cannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.

    " }, "MasterUserPassword":{ "shape":"String", - "documentation":"

    The password associated with the admin user for the cluster that is being created.

    Constraints:

    • Must be between 8 and 64 characters in length.

    • Must contain at least one uppercase letter.

    • Must contain at least one lowercase letter.

    • Must contain one number.

    • Can be any printable ASCII character (ASCII code 33-126) except ' (single quote), \" (double quote), \\, /, or @.

    " + "documentation":"

    The password associated with the admin user account for the cluster that is being created.

    Constraints:

    • Must be between 8 and 64 characters in length.

    • Must contain at least one uppercase letter.

    • Must contain at least one lowercase letter.

    • Must contain one number.

    • Can be any printable ASCII character (ASCII code 33-126) except ' (single quote), \" (double quote), \\, /, or @.

    " }, "ClusterSecurityGroups":{ "shape":"ClusterSecurityGroupNameList", @@ -3945,6 +4073,49 @@ "ClusterSubnetGroup":{"shape":"ClusterSubnetGroup"} } }, + "CreateCustomDomainAssociationMessage":{ + "type":"structure", + "required":[ + "CustomDomainName", + "CustomDomainCertificateArn", + "ClusterIdentifier" + ], + "members":{ + "CustomDomainName":{ + "shape":"CustomDomainNameString", + "documentation":"

    The custom domain name for a custom domain association.

    " + }, + "CustomDomainCertificateArn":{ + "shape":"CustomDomainCertificateArnString", + "documentation":"

    The certificate Amazon Resource Name (ARN) for the custom domain name association.

    " + }, + "ClusterIdentifier":{ + "shape":"String", + "documentation":"

    The cluster identifier that the custom domain is associated with.

    " + } + } + }, + "CreateCustomDomainAssociationResult":{ + "type":"structure", + "members":{ + "CustomDomainName":{ + "shape":"CustomDomainNameString", + "documentation":"

    The custom domain name for the association result.

    " + }, + "CustomDomainCertificateArn":{ + "shape":"CustomDomainCertificateArnString", + "documentation":"

    The Amazon Resource Name (ARN) for the certificate associated with the custom domain name.

    " + }, + "ClusterIdentifier":{ + "shape":"String", + "documentation":"

    The identifier of the cluster that the custom domain is associated with.

    " + }, + "CustomDomainCertExpiryTime":{ + "shape":"String", + "documentation":"

    The expiration time for the certificate for the custom domain.

    " + } + } + }, "CreateEndpointAccessMessage":{ "type":"structure", "required":[ @@ -4245,6 +4416,55 @@ } } }, + "CustomCnameAssociationFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An error occurred when an attempt was made to change the custom domain association.

    ", + "error":{ + "code":"CustomCnameAssociationFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CustomDomainAssociationNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An error occurred. The custom domain name couldn't be found.

    ", + "error":{ + "code":"CustomDomainAssociationNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "CustomDomainAssociationsMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

    The marker for the custom domain association.

    " + }, + "Associations":{ + "shape":"AssociationList", + "documentation":"

    The associations for the custom domain.

    " + } + } + }, + "CustomDomainCertificateArnString":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:[\\w+=/,.@-]+:acm:[\\w+=/,.@-]*:[0-9]+:[\\w+=,.@-]+(/[\\w+=,.@-]+)*" + }, + "CustomDomainNameString":{ + "type":"string", + "max":253, + "min":1, + "pattern":"^((?!-)[A-Za-z0-9-]{1,63}(?The unique identifier of the cluster the snapshot was created from. This parameter is required if your IAM user or role has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

    Constraints: Must be the name of valid cluster.

    " + "documentation":"

    The unique identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

    Constraints: Must be the name of valid cluster.

    " } }, "documentation":"

    " @@ -4552,6 +4772,16 @@ }, "documentation":"

    " }, + "DeleteCustomDomainAssociationMessage":{ + "type":"structure", + "required":["ClusterIdentifier"], + "members":{ + "ClusterIdentifier":{ + "shape":"String", + "documentation":"

    The identifier of the cluster to delete a custom domain association for.

    " + } + } + }, "DeleteEndpointAccessMessage":{ "type":"structure", "required":["EndpointName"], @@ -4826,7 +5056,7 @@ }, "MaxRecords":{ "shape":"IntegerOptional", - "documentation":"

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 500.

    " + "documentation":"

    The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

    Default: 100

    Constraints: minimum 20, maximum 100.

    " }, "Marker":{ "shape":"String", @@ -4946,6 +5176,27 @@ }, "documentation":"

    " }, + "DescribeCustomDomainAssociationsMessage":{ + "type":"structure", + "members":{ + "CustomDomainName":{ + "shape":"CustomDomainNameString", + "documentation":"

    The custom domain name for the custom domain association.

    " + }, + "CustomDomainCertificateArn":{ + "shape":"CustomDomainCertificateArnString", + "documentation":"

    The certificate Amazon Resource Name (ARN) for the custom domain association.

    " + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

    The maximum records setting for the associated custom domain.

    " + }, + "Marker":{ + "shape":"String", + "documentation":"

    The marker for the custom domain association.

    " + } + } + }, "DescribeDataSharesForConsumerMessage":{ "type":"structure", "members":{ @@ -6218,10 +6469,7 @@ }, "GetClusterCredentialsMessage":{ "type":"structure", - "required":[ - "DbUser", - "ClusterIdentifier" - ], + "required":["DbUser"], "members":{ "DbUser":{ "shape":"String", @@ -6246,13 +6494,16 @@ "DbGroups":{ "shape":"DbGroupList", "documentation":"

    A list of the names of existing database groups that the user named in DbUser will join for the current session, in addition to any group memberships for an existing user. If not specified, a new user is added only to PUBLIC.

    Database group name constraints

    • Must be 1 to 64 alphanumeric characters or hyphens

    • Must contain only lowercase letters, numbers, underscore, plus sign, period (dot), at symbol (@), or hyphen.

    • First character must be a letter.

    • Must not contain a colon ( : ) or slash ( / ).

    • Cannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.

    " + }, + "CustomDomainName":{ + "shape":"String", + "documentation":"

    The custom domain name for the cluster credentials.

    " } }, "documentation":"

    The request parameters to get cluster credentials.

    " }, "GetClusterCredentialsWithIAMMessage":{ "type":"structure", - "required":["ClusterIdentifier"], "members":{ "DbName":{ "shape":"String", @@ -6265,6 +6516,10 @@ "DurationSeconds":{ "shape":"IntegerOptional", "documentation":"

    The number of seconds until the returned temporary password expires.

    Range: 900-3600. Default: 900.

    " + }, + "CustomDomainName":{ + "shape":"String", + "documentation":"

    The custom domain name for the IAM message cluster credentials.

    " } } }, @@ -7230,7 +7485,7 @@ }, "MasterUserPassword":{ "shape":"String", - "documentation":"

    The new password for the cluster admin user. This change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

    Operations never return the password, so this operation provides a way to regain access to the admin user for a cluster if the password is lost.

    Default: Uses existing setting.

    Constraints:

    • Must be between 8 and 64 characters in length.

    • Must contain at least one uppercase letter.

    • Must contain at least one lowercase letter.

    • Must contain one number.

    • Can be any printable ASCII character (ASCII code 33-126) except ' (single quote), \" (double quote), \\, /, or @.

    " + "documentation":"

    The new password for the cluster admin user. This change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response.

    Operations never return the password, so this operation provides a way to regain access to the admin user account for a cluster if the password is lost.

    Default: Uses existing setting.

    Constraints:

    • Must be between 8 and 64 characters in length.

    • Must contain at least one uppercase letter.

    • Must contain at least one lowercase letter.

    • Must contain one number.

    • Can be any printable ASCII character (ASCII code 33-126) except ' (single quote), \" (double quote), \\, /, or @.

    " }, "ClusterParameterGroupName":{ "shape":"String", @@ -7401,6 +7656,45 @@ "ClusterSubnetGroup":{"shape":"ClusterSubnetGroup"} } }, + "ModifyCustomDomainAssociationMessage":{ + "type":"structure", + "required":["ClusterIdentifier"], + "members":{ + "CustomDomainName":{ + "shape":"CustomDomainNameString", + "documentation":"

    The custom domain name for a changed custom domain association.

    " + }, + "CustomDomainCertificateArn":{ + "shape":"CustomDomainCertificateArnString", + "documentation":"

    The certificate Amazon Resource Name (ARN) for the changed custom domain association.

    " + }, + "ClusterIdentifier":{ + "shape":"String", + "documentation":"

    The identifier of the cluster to change a custom domain association for.

    " + } + } + }, + "ModifyCustomDomainAssociationResult":{ + "type":"structure", + "members":{ + "CustomDomainName":{ + "shape":"CustomDomainNameString", + "documentation":"

    The custom domain name associated with the result for the changed custom domain association.

    " + }, + "CustomDomainCertificateArn":{ + "shape":"CustomDomainCertificateArnString", + "documentation":"

    The certificate Amazon Resource Name (ARN) associated with the result for the changed custom domain association.

    " + }, + "ClusterIdentifier":{ + "shape":"String", + "documentation":"

    The identifier of the cluster associated with the result for the changed custom domain association.

    " + }, + "CustomDomainCertExpiryTime":{ + "shape":"String", + "documentation":"

    The certificate expiration time associated with the result for the changed custom domain association.

    " + } + } + }, "ModifyEndpointAccessMessage":{ "type":"structure", "required":["EndpointName"], @@ -8575,7 +8869,7 @@ }, "SnapshotClusterIdentifier":{ "shape":"String", - "documentation":"

    The name of the cluster the source snapshot was created from. This parameter is required if your IAM user or role has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

    " + "documentation":"

    The name of the cluster the source snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

    " }, "Port":{ "shape":"IntegerOptional", @@ -8893,7 +9187,7 @@ }, "SnapshotClusterIdentifier":{ "shape":"String", - "documentation":"

    The identifier of the cluster the snapshot was created from. This parameter is required if your IAM user or role has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

    " + "documentation":"

    The identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.

    " }, "AccountWithRestoreAccess":{ "shape":"String", diff --git a/services/redshiftdata/pom.xml b/services/redshiftdata/pom.xml index 7fe69cd45692..cfaa87cfc6bb 100644 --- a/services/redshiftdata/pom.xml +++ b/services/redshiftdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT redshiftdata AWS Java SDK :: Services :: Redshift Data diff --git a/services/redshiftserverless/pom.xml b/services/redshiftserverless/pom.xml index dab82c1c89a0..144ce67e6e1d 100644 --- a/services/redshiftserverless/pom.xml +++ b/services/redshiftserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT redshiftserverless AWS Java SDK :: Services :: Redshift Serverless diff --git a/services/rekognition/pom.xml b/services/rekognition/pom.xml index 64a1b59857f0..99db6abfcab1 100644 --- a/services/rekognition/pom.xml +++ b/services/rekognition/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT rekognition AWS Java SDK :: Services :: Amazon Rekognition diff --git a/services/rekognition/src/main/resources/codegen-resources/paginators-1.json b/services/rekognition/src/main/resources/codegen-resources/paginators-1.json index 8874e7e7f4f8..6ac67e68e21b 100644 --- a/services/rekognition/src/main/resources/codegen-resources/paginators-1.json +++ b/services/rekognition/src/main/resources/codegen-resources/paginators-1.json @@ -86,6 +86,12 @@ "input_token": "NextToken", "limit_key": "MaxResults", "output_token": "NextToken" + }, + "ListUsers": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Users" } } } \ No newline at end of file diff --git a/services/rekognition/src/main/resources/codegen-resources/service-2.json b/services/rekognition/src/main/resources/codegen-resources/service-2.json index 633410f12449..2aff6686e930 100644 --- a/services/rekognition/src/main/resources/codegen-resources/service-2.json +++ b/services/rekognition/src/main/resources/codegen-resources/service-2.json @@ -12,6 +12,27 @@ "uid":"rekognition-2016-06-27" }, "operations":{ + "AssociateFaces":{ + "name":"AssociateFaces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateFacesRequest"}, + "output":{"shape":"AssociateFacesResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Associates one or more faces with an existing UserID. Takes an array of FaceIds. Each FaceId that are present in the FaceIds list is associated with the provided UserID. The maximum number of total FaceIds per UserID is 100.

    The UserMatchThreshold parameter specifies the minimum user match confidence required for the face to be associated with a UserID that has at least one FaceID already associated. This ensures that the FaceIds are associated with the right UserID. The value ranges from 0-100 and default value is 75.

    If successful, an array of AssociatedFace objects containing the associated FaceIds is returned. If a given face is already associated with the given UserID, it will be ignored and will not be returned in the response. If a given face is already associated to a different UserID, isn't found in the collection, doesn’t meet the UserMatchThreshold, or there are already 100 faces associated with the UserID, it will be returned as part of an array of UnsuccessfulFaceAssociations.

    The UserStatus reflects the status of an operation which updates a UserID representation with a list of given faces. The UserStatus can be:

    • ACTIVE - All associations or disassociations of FaceID(s) for a UserID are complete.

    • CREATED - A UserID has been created, but has no FaceID(s) associated with it.

    • UPDATING - A UserID is being updated and there are current associations or disassociations of FaceID(s) taking place.

    " + }, "CompareFaces":{ "name":"CompareFaces", "http":{ @@ -171,6 +192,27 @@ ], "documentation":"

    Creates an Amazon Rekognition stream processor that you can use to detect and recognize faces or to detect labels in a streaming video.

    Amazon Rekognition Video is a consumer of live video from Amazon Kinesis Video Streams. There are two different settings for stream processors in Amazon Rekognition: detecting faces and detecting labels.

    • If you are creating a stream processor for detecting faces, you provide as input a Kinesis video stream (Input) and a Kinesis data stream (Output) stream for receiving the output. You must use the FaceSearch option in Settings, specifying the collection that contains the faces you want to recognize. After you have finished analyzing a streaming video, use StopStreamProcessor to stop processing.

    • If you are creating a stream processor to detect labels, you provide as input a Kinesis video stream (Input), Amazon S3 bucket information (Output), and an Amazon SNS topic ARN (NotificationChannel). You can also provide a KMS key ID to encrypt the data sent to your Amazon S3 bucket. You specify what you want to detect by using the ConnectedHome option in settings, and selecting one of the following: PERSON, PET, PACKAGE, ALL You can also specify where in the frame you want Amazon Rekognition to monitor with RegionsOfInterest. When you run the StartStreamProcessor operation on a label detection stream processor, you input start and stop information to determine the length of the processing time.

    Use Name to assign an identifier for the stream processor. You use Name to manage the stream processor. For example, you can start processing the source video by calling StartStreamProcessor with the Name field.

    This operation requires permissions to perform the rekognition:CreateStreamProcessor action. If you want to tag your stream processor, you also require permission to perform the rekognition:TagResource operation.

    " }, + "CreateUser":{ + "name":"CreateUser", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateUserRequest"}, + "output":{"shape":"CreateUserResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Creates a new User within a collection specified by CollectionId. Takes UserId as a parameter, which is a user provided ID which should be unique within the collection. The provided UserId will alias the system generated UUID to make the UserId more user friendly.

    Uses a ClientToken, an idempotency token that ensures a call to CreateUser completes only once. If the value is not supplied, the AWS SDK generates an idempotency token for the requests. This prevents retries after a network error results from making multiple CreateUser calls.

    " + }, "DeleteCollection":{ "name":"DeleteCollection", "http":{ @@ -303,6 +345,26 @@ ], "documentation":"

    Deletes the stream processor identified by Name. You assign the value for Name when you create the stream processor with CreateStreamProcessor. You might not be able to use the same name for a stream processor for a few seconds after calling DeleteStreamProcessor.

    " }, + "DeleteUser":{ + "name":"DeleteUser", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteUserRequest"}, + "output":{"shape":"DeleteUserResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Deletes the specified UserID within the collection. Faces that are associated with the UserID are disassociated from the UserID before deleting the specified UserID. If the specified Collection or UserID is already deleted or not found, a ResourceNotFoundException will be thrown. If the action is successful with a 200 response, an empty HTTP body is returned.

    " + }, "DescribeCollection":{ "name":"DescribeCollection", "http":{ @@ -518,6 +580,26 @@ ], "documentation":"

    Detects text in the input image and converts it into machine-readable text.

    Pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, you must pass it as a reference to an image in an Amazon S3 bucket. For the AWS CLI, passing image bytes is not supported. The image must be either a .png or .jpeg formatted file.

    The DetectText operation returns text in an array of TextDetection elements, TextDetections. Each TextDetection element provides information about a single word or line of text that was detected in the image.

    A word is one or more script characters that are not separated by spaces. DetectText can detect up to 100 words in an image.

    A line is a string of equally spaced words. A line isn't necessarily a complete sentence. For example, a driver's license number is detected as a line. A line ends when there is no aligned text after it. Also, a line ends when there is a large gap between words, relative to the length of the words. This means, depending on the gap between words, Amazon Rekognition may detect multiple lines in text aligned in the same direction. Periods don't represent the end of a line. If a sentence spans multiple lines, the DetectText operation returns multiple lines.

    To determine whether a TextDetection element is a line of text or a word, use the TextDetection object Type field.

    To be detected, text must be within +/- 90 degrees orientation of the horizontal axis.

    For more information, see Detecting text in the Amazon Rekognition Developer Guide.

    " }, + "DisassociateFaces":{ + "name":"DisassociateFaces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateFacesRequest"}, + "output":{"shape":"DisassociateFacesResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Removes the association between a Face supplied in an array of FaceIds and the User. If the User is not present already, then a ResourceNotFound exception is thrown. If successful, an array of faces that are disassociated from the User is returned. If a given face is already disassociated from the given UserID, it will be ignored and not be returned in the response. If a given face is already associated with a different User or not found in the collection it will be returned as part of UnsuccessfulDisassociations. You can remove 1 - 100 face IDs from a user at one time.

    " + }, "DistributeDatasetEntries":{ "name":"DistributeDatasetEntries", "http":{ @@ -882,6 +964,25 @@ ], "documentation":"

    Returns a list of tags in an Amazon Rekognition collection, stream processor, or Custom Labels model.

    This operation requires permissions to perform the rekognition:ListTagsForResource action.

    " }, + "ListUsers":{ + "name":"ListUsers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListUsersRequest"}, + "output":{"shape":"ListUsersResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidPaginationTokenException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns metadata of the User such as UserID in the specified collection. Anonymous User (to reserve faces without any identity) is not returned as part of this request. The results are sorted by system generated primary key ID. If the response is truncated, NextToken is returned in the response that can be used in the subsequent request to retrieve the next set of identities.

    " + }, "PutProjectPolicy":{ "name":"PutProjectPolicy", "http":{ @@ -965,6 +1066,45 @@ ], "documentation":"

    For a given input image, first detects the largest face in the image, and then searches the specified collection for matching faces. The operation compares the features of the input face with faces in the specified collection.

    To search for all faces in an input image, you might first call the IndexFaces operation, and then use the face IDs returned in subsequent calls to the SearchFaces operation.

    You can also call the DetectFaces operation and use the bounding boxes in the response to make face crops, which then you can pass in to the SearchFacesByImage operation.

    You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

    The response returns an array of faces that match, ordered by similarity score with the highest similarity first. More specifically, it is an array of metadata for each face match found. Along with the metadata, the response also includes a similarity indicating how similar the face is to the input face. In the response, the operation also returns the bounding box (and a confidence level that the bounding box contains a face) of the face that Amazon Rekognition used for the input image.

    If no faces are detected in the input image, SearchFacesByImage returns an InvalidParameterException error.

    For an example, Searching for a Face Using an Image in the Amazon Rekognition Developer Guide.

    The QualityFilter input parameter allows you to filter out detected faces that don’t meet a required quality bar. The quality bar is based on a variety of common use cases. Use QualityFilter to set the quality bar for filtering by specifying LOW, MEDIUM, or HIGH. If you do not want to filter detected faces, specify NONE. The default value is NONE.

    To use quality filtering, you need a collection associated with version 3 of the face model or higher. To get the version of the face model associated with a collection, call DescribeCollection.

    This operation requires permissions to perform the rekognition:SearchFacesByImage action.

    " }, + "SearchUsers":{ + "name":"SearchUsers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SearchUsersRequest"}, + "output":{"shape":"SearchUsersResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Searches for UserIDs within a collection based on a FaceId or UserId. This API can be used to find the closest UserID (with a highest similarity) to associate a face. The request must be provided with either FaceId or UserId. The operation returns an array of UserID that match the FaceId or UserId, ordered by similarity score with the highest similarity first.

    " + }, + "SearchUsersByImage":{ + "name":"SearchUsersByImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SearchUsersByImageRequest"}, + "output":{"shape":"SearchUsersByImageResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidImageFormatException"}, + {"shape":"InvalidS3ObjectException"}, + {"shape":"ImageTooLargeException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Searches for UserIDs using a supplied image. It first detects the largest face in the image, and then searches a specified collection for matching UserIDs.

    The operation returns an array of UserIDs that match the face in the supplied image, ordered by similarity score with the highest similarity first. It also returns a bounding box for the face found in the input image.

    Information about faces detected in the supplied image, but not used for the search, is returned in an array of UnsearchedFace objects. If no valid face is detected in the image, the response will contain an empty UserMatches list and no SearchedFace object.

    " + }, "StartCelebrityRecognition":{ "name":"StartCelebrityRecognition", "http":{ @@ -1329,6 +1469,70 @@ "type":"list", "member":{"shape":"Asset"} }, + "AssociateFacesRequest":{ + "type":"structure", + "required":[ + "CollectionId", + "UserId", + "FaceIds" + ], + "members":{ + "CollectionId":{ + "shape":"CollectionId", + "documentation":"

    The ID of an existing collection containing the UserID.

    " + }, + "UserId":{ + "shape":"UserId", + "documentation":"

    The ID for the existing UserID.

    " + }, + "FaceIds":{ + "shape":"UserFaceIdList", + "documentation":"

    An array of FaceIDs to associate with the UserID.

    " + }, + "UserMatchThreshold":{ + "shape":"Percent", + "documentation":"

    An optional value specifying the minimum confidence in the UserID match to return. The default value is 75.

    " + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    Idempotent token used to identify the request to AssociateFaces. If you use the same token with multiple AssociateFaces requests, the same response is returned. Use ClientRequestToken to prevent the same request from being processed more than once.

    ", + "idempotencyToken":true + } + } + }, + "AssociateFacesResponse":{ + "type":"structure", + "members":{ + "AssociatedFaces":{ + "shape":"AssociatedFacesList", + "documentation":"

    An array of AssociatedFace objects containing FaceIDs that are successfully associated with the UserID is returned. Returned if the AssociateFaces action is successful.

    " + }, + "UnsuccessfulFaceAssociations":{ + "shape":"UnsuccessfulFaceAssociationList", + "documentation":"

    An array of UnsuccessfulAssociation objects containing FaceIDs that are not successfully associated along with the reasons. Returned if the AssociateFaces action is successful.

    " + }, + "UserStatus":{ + "shape":"UserStatus", + "documentation":"

    The status of an update made to a UserID. Reflects if the UserID has been updated for every requested change.

    " + } + } + }, + "AssociatedFace":{ + "type":"structure", + "members":{ + "FaceId":{ + "shape":"FaceId", + "documentation":"

    Unique identifier assigned to the face.

    " + } + }, + "documentation":"

    Provides face metadata for the faces that are associated to a specific UserID.

    " + }, + "AssociatedFacesList":{ + "type":"list", + "member":{"shape":"AssociatedFace"}, + "max":100, + "min":0 + }, "Attribute":{ "type":"string", "enum":[ @@ -1703,6 +1907,13 @@ }, "documentation":"

    Type that describes the face Amazon Rekognition chose to compare with the faces in the target. This contains a bounding box for the selected face and confidence level that the bounding box contains a face. Note that Amazon Rekognition selects the largest face in the source image for this comparison.

    " }, + "ConflictException":{ + "type":"structure", + "members":{ + }, + "documentation":"

    A User with the same Id already exists within the collection, or the update or deletion of the User caused an inconsistent state. **

    ", + "exception":true + }, "ConnectedHomeLabel":{"type":"string"}, "ConnectedHomeLabels":{ "type":"list", @@ -2084,6 +2295,33 @@ } } }, + "CreateUserRequest":{ + "type":"structure", + "required":[ + "CollectionId", + "UserId" + ], + "members":{ + "CollectionId":{ + "shape":"CollectionId", + "documentation":"

    The ID of an existing collection to which the new UserID needs to be created.

    " + }, + "UserId":{ + "shape":"UserId", + "documentation":"

    ID for the UserID to be created. This ID needs to be unique within the collection.

    " + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    Idempotent token used to identify the request to CreateUser. If you use the same token with multiple CreateUser requests, the same response is returned. Use ClientRequestToken to prevent the same request from being processed more than once.

    ", + "idempotencyToken":true + } + } + }, + "CreateUserResponse":{ + "type":"structure", + "members":{ + } + }, "CustomLabel":{ "type":"structure", "members":{ @@ -2364,6 +2602,10 @@ "DeletedFaces":{ "shape":"FaceIdList", "documentation":"

    An array of strings (face IDs) of the faces that were deleted.

    " + }, + "UnsuccessfulFaceDeletions":{ + "shape":"UnsuccessfulFaceDeletionsList", + "documentation":"

    An array of any faces that weren't deleted.

    " } } }, @@ -2446,6 +2688,33 @@ "members":{ } }, + "DeleteUserRequest":{ + "type":"structure", + "required":[ + "CollectionId", + "UserId" + ], + "members":{ + "CollectionId":{ + "shape":"CollectionId", + "documentation":"

    The ID of an existing collection from which the UserID needs to be deleted.

    " + }, + "UserId":{ + "shape":"UserId", + "documentation":"

    ID for the UserID to be deleted.

    " + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    Idempotent token used to identify the request to DeleteUser. If you use the same token with multiple DeleteUser requests, the same response is returned. Use ClientRequestToken to prevent the same request from being processed more than once.

    ", + "idempotencyToken":true + } + } + }, + "DeleteUserResponse":{ + "type":"structure", + "members":{ + } + }, "DescribeCollectionRequest":{ "type":"structure", "required":["CollectionId"], @@ -2474,6 +2743,10 @@ "CreationTimestamp":{ "shape":"DateTime", "documentation":"

    The number of milliseconds since the Unix epoch time until the creation of the collection. The Unix epoch time is 00:00:00 Coordinated Universal Time (UTC), Thursday, 1 January 1970.

    " + }, + "UserCount":{ + "shape":"ULong", + "documentation":"

    The number of UserIDs assigned to the specified colleciton.

    " } } }, @@ -2966,6 +3239,66 @@ }, "documentation":"

    A set of parameters that allow you to filter out certain results from your returned results.

    " }, + "DisassociateFacesRequest":{ + "type":"structure", + "required":[ + "CollectionId", + "UserId", + "FaceIds" + ], + "members":{ + "CollectionId":{ + "shape":"CollectionId", + "documentation":"

    The ID of an existing collection containing the UserID.

    " + }, + "UserId":{ + "shape":"UserId", + "documentation":"

    ID for the existing UserID.

    " + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

    Idempotent token used to identify the request to DisassociateFaces. If you use the same token with multiple DisassociateFaces requests, the same response is returned. Use ClientRequestToken to prevent the same request from being processed more than once.

    ", + "idempotencyToken":true + }, + "FaceIds":{ + "shape":"UserFaceIdList", + "documentation":"

    An array of face IDs to disassociate from the UserID.

    " + } + } + }, + "DisassociateFacesResponse":{ + "type":"structure", + "members":{ + "DisassociatedFaces":{ + "shape":"DisassociatedFacesList", + "documentation":"

    An array of DissociatedFace objects containing FaceIds that are successfully disassociated with the UserID is returned. Returned if the DisassociatedFaces action is successful.

    " + }, + "UnsuccessfulFaceDisassociations":{ + "shape":"UnsuccessfulFaceDisassociationList", + "documentation":"

    An array of UnsuccessfulDisassociation objects containing FaceIds that are not successfully associated, along with the reasons for the failure to associate. Returned if the DisassociateFaces action is successful.

    " + }, + "UserStatus":{ + "shape":"UserStatus", + "documentation":"

    The status of an update made to a User. Reflects if the User has been updated for every requested change.

    " + } + } + }, + "DisassociatedFace":{ + "type":"structure", + "members":{ + "FaceId":{ + "shape":"FaceId", + "documentation":"

    Unique identifier assigned to the face.

    " + } + }, + "documentation":"

    Provides face metadata for the faces that are disassociated from a specific UserID.

    " + }, + "DisassociatedFacesList":{ + "type":"list", + "member":{"shape":"DisassociatedFace"}, + "max":100, + "min":0 + }, "DistributeDataset":{ "type":"structure", "required":["Arn"], @@ -3190,6 +3523,10 @@ "IndexFacesModelVersion":{ "shape":"IndexFacesModelVersion", "documentation":"

    The version of the face detect and storage model that was used when indexing the face vector.

    " + }, + "UserId":{ + "shape":"UserId", + "documentation":"

    Unique identifier assigned to the user.

    " } }, "documentation":"

    Describes the face properties such as the bounding box, face ID, image ID of the input image, and external image ID that you assigned.

    " @@ -4711,6 +5048,14 @@ "MaxResults":{ "shape":"PageSize", "documentation":"

    Maximum number of faces to return.

    " + }, + "UserId":{ + "shape":"UserId", + "documentation":"

    An array of user IDs to match when listing faces in a collection.

    " + }, + "FaceIds":{ + "shape":"FaceIdList", + "documentation":"

    An array of face IDs to match when listing faces in a collection.

    " } } }, @@ -4812,6 +5157,37 @@ } } }, + "ListUsersRequest":{ + "type":"structure", + "required":["CollectionId"], + "members":{ + "CollectionId":{ + "shape":"CollectionId", + "documentation":"

    The ID of an existing collection.

    " + }, + "MaxResults":{ + "shape":"MaxUserResults", + "documentation":"

    Maximum number of UsersID to return.

    " + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    Pagingation token to receive the next set of UsersID.

    " + } + } + }, + "ListUsersResponse":{ + "type":"structure", + "members":{ + "Users":{ + "shape":"UserList", + "documentation":"

    List of UsersID associated with the specified collection.

    " + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    A pagination token to be used with the subsequent request if the response is truncated.

    " + } + } + }, "LivenessImageBlob":{ "type":"blob", "max":204800, @@ -4861,6 +5237,20 @@ "documentation":"

    The format of the project policy document that you supplied to PutProjectPolicy is incorrect.

    ", "exception":true }, + "MatchedUser":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"UserId", + "documentation":"

    A provided ID for the UserID. Unique within the collection.

    " + }, + "UserStatus":{ + "shape":"UserStatus", + "documentation":"

    The status of the user matched to a provided FaceID.

    " + } + }, + "documentation":"

    Contains metadata for a UserID matched with a given face.

    " + }, "MaxDurationInSecondsULong":{ "type":"long", "max":120, @@ -4884,6 +5274,11 @@ "type":"integer", "min":1 }, + "MaxUserResults":{ + "type":"integer", + "max":500, + "min":1 + }, "MinCoveragePercentage":{ "type":"float", "max":100, @@ -5722,6 +6117,127 @@ } } }, + "SearchUsersByImageRequest":{ + "type":"structure", + "required":[ + "CollectionId", + "Image" + ], + "members":{ + "CollectionId":{ + "shape":"CollectionId", + "documentation":"

    The ID of an existing collection containing the UserID.

    " + }, + "Image":{"shape":"Image"}, + "UserMatchThreshold":{ + "shape":"Percent", + "documentation":"

    Specifies the minimum confidence in the UserID match to return. Default value is 80.

    " + }, + "MaxUsers":{ + "shape":"MaxUserResults", + "documentation":"

    Maximum number of UserIDs to return.

    " + }, + "QualityFilter":{ + "shape":"QualityFilter", + "documentation":"

    A filter that specifies a quality bar for how much filtering is done to identify faces. Filtered faces aren't searched for in the collection. The default value is NONE.

    " + } + } + }, + "SearchUsersByImageResponse":{ + "type":"structure", + "members":{ + "UserMatches":{ + "shape":"UserMatchList", + "documentation":"

    An array of UserID objects that matched the input face, along with the confidence in the match. The returned structure will be empty if there are no matches. Returned if the SearchUsersByImageResponse action is successful.

    " + }, + "FaceModelVersion":{ + "shape":"String", + "documentation":"

    Version number of the face detection model associated with the input collection CollectionId.

    " + }, + "SearchedFace":{ + "shape":"SearchedFaceDetails", + "documentation":"

    A list of FaceDetail objects containing the BoundingBox for the largest face in image, as well as the confidence in the bounding box, that was searched for matches. If no valid face is detected in the image the response will contain no SearchedFace object.

    " + }, + "UnsearchedFaces":{ + "shape":"UnsearchedFacesList", + "documentation":"

    List of UnsearchedFace objects. Contains the face details infered from the specified image but not used for search. Contains reasons that describe why a face wasn't used for Search.

    " + } + } + }, + "SearchUsersRequest":{ + "type":"structure", + "required":["CollectionId"], + "members":{ + "CollectionId":{ + "shape":"CollectionId", + "documentation":"

    The ID of an existing collection containing the UserID, used with a UserId or FaceId. If a FaceId is provided, UserId isn’t required to be present in the Collection.

    " + }, + "UserId":{ + "shape":"UserId", + "documentation":"

    ID for the existing User.

    " + }, + "FaceId":{ + "shape":"FaceId", + "documentation":"

    ID for the existing face.

    " + }, + "UserMatchThreshold":{ + "shape":"Percent", + "documentation":"

    Optional value that specifies the minimum confidence in the matched UserID to return. Default value of 80.

    " + }, + "MaxUsers":{ + "shape":"MaxUserResults", + "documentation":"

    Maximum number of identities to return.

    " + } + } + }, + "SearchUsersResponse":{ + "type":"structure", + "members":{ + "UserMatches":{ + "shape":"UserMatchList", + "documentation":"

    An array of UserMatch objects that matched the input face along with the confidence in the match. Array will be empty if there are no matches.

    " + }, + "FaceModelVersion":{ + "shape":"String", + "documentation":"

    Version number of the face detection model associated with the input CollectionId.

    " + }, + "SearchedFace":{ + "shape":"SearchedFace", + "documentation":"

    Contains the ID of a face that was used to search for matches in a collection.

    " + }, + "SearchedUser":{ + "shape":"SearchedUser", + "documentation":"

    Contains the ID of the UserID that was used to search for matches in a collection.

    " + } + } + }, + "SearchedFace":{ + "type":"structure", + "members":{ + "FaceId":{ + "shape":"FaceId", + "documentation":"

    Unique identifier assigned to the face.

    " + } + }, + "documentation":"

    Provides face metadata such as FaceId, BoundingBox, Confidence of the input face used for search.

    " + }, + "SearchedFaceDetails":{ + "type":"structure", + "members":{ + "FaceDetail":{"shape":"FaceDetail"} + }, + "documentation":"

    Contains data regarding the input face used for a search.

    " + }, + "SearchedUser":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"UserId", + "documentation":"

    A provided ID for the UserID. Unique within the collection.

    " + } + }, + "documentation":"

    Contains metadata about a User searched for within a collection.

    " + }, "SegmentConfidence":{ "type":"float", "max":100, @@ -6674,6 +7190,148 @@ "type":"list", "member":{"shape":"UnindexedFace"} }, + "UnsearchedFace":{ + "type":"structure", + "members":{ + "FaceDetails":{"shape":"FaceDetail"}, + "Reasons":{ + "shape":"UnsearchedFaceReasons", + "documentation":"

    Reasons why a face wasn't used for Search.

    " + } + }, + "documentation":"

    Face details inferred from the image but not used for search. The response attribute contains reasons for why a face wasn't used for Search.

    " + }, + "UnsearchedFaceReason":{ + "type":"string", + "enum":[ + "FACE_NOT_LARGEST", + "EXCEEDS_MAX_FACES", + "EXTREME_POSE", + "LOW_BRIGHTNESS", + "LOW_SHARPNESS", + "LOW_CONFIDENCE", + "SMALL_BOUNDING_BOX", + "LOW_FACE_QUALITY" + ] + }, + "UnsearchedFaceReasons":{ + "type":"list", + "member":{"shape":"UnsearchedFaceReason"} + }, + "UnsearchedFacesList":{ + "type":"list", + "member":{"shape":"UnsearchedFace"} + }, + "UnsuccessfulFaceAssociation":{ + "type":"structure", + "members":{ + "FaceId":{ + "shape":"FaceId", + "documentation":"

    A unique identifier assigned to the face.

    " + }, + "UserId":{ + "shape":"UserId", + "documentation":"

    A provided ID for the UserID. Unique within the collection.

    " + }, + "Confidence":{ + "shape":"Percent", + "documentation":"

    Match confidence with the UserID, provides information regarding if a face association was unsuccessful because it didn't meet UserMatchThreshold.

    " + }, + "Reasons":{ + "shape":"UnsuccessfulFaceAssociationReasons", + "documentation":"

    The reason why the association was unsuccessful.

    " + } + }, + "documentation":"

    Contains metadata like FaceId, UserID, and Reasons, for a face that was unsuccessfully associated.

    " + }, + "UnsuccessfulFaceAssociationList":{ + "type":"list", + "member":{"shape":"UnsuccessfulFaceAssociation"}, + "max":500, + "min":0 + }, + "UnsuccessfulFaceAssociationReason":{ + "type":"string", + "enum":[ + "FACE_NOT_FOUND", + "ASSOCIATED_TO_A_DIFFERENT_USER", + "LOW_MATCH_CONFIDENCE" + ] + }, + "UnsuccessfulFaceAssociationReasons":{ + "type":"list", + "member":{"shape":"UnsuccessfulFaceAssociationReason"} + }, + "UnsuccessfulFaceDeletion":{ + "type":"structure", + "members":{ + "FaceId":{ + "shape":"FaceId", + "documentation":"

    A unique identifier assigned to the face.

    " + }, + "UserId":{ + "shape":"UserId", + "documentation":"

    A provided ID for the UserID. Unique within the collection.

    " + }, + "Reasons":{ + "shape":"UnsuccessfulFaceDeletionReasons", + "documentation":"

    The reason why the deletion was unsuccessful.

    " + } + }, + "documentation":"

    Contains metadata like FaceId, UserID, and Reasons, for a face that was unsuccessfully deleted.

    " + }, + "UnsuccessfulFaceDeletionReason":{ + "type":"string", + "enum":[ + "ASSOCIATED_TO_AN_EXISTING_USER", + "FACE_NOT_FOUND" + ] + }, + "UnsuccessfulFaceDeletionReasons":{ + "type":"list", + "member":{"shape":"UnsuccessfulFaceDeletionReason"} + }, + "UnsuccessfulFaceDeletionsList":{ + "type":"list", + "member":{"shape":"UnsuccessfulFaceDeletion"}, + "max":4096, + "min":0 + }, + "UnsuccessfulFaceDisassociation":{ + "type":"structure", + "members":{ + "FaceId":{ + "shape":"FaceId", + "documentation":"

    A unique identifier assigned to the face.

    " + }, + "UserId":{ + "shape":"UserId", + "documentation":"

    A provided ID for the UserID. Unique within the collection.

    " + }, + "Reasons":{ + "shape":"UnsuccessfulFaceDisassociationReasons", + "documentation":"

    The reason why the deletion was unsuccessful.

    " + } + }, + "documentation":"

    Contains metadata like FaceId, UserID, and Reasons, for a face that was unsuccessfully disassociated.

    " + }, + "UnsuccessfulFaceDisassociationList":{ + "type":"list", + "member":{"shape":"UnsuccessfulFaceDisassociation"}, + "max":500, + "min":0 + }, + "UnsuccessfulFaceDisassociationReason":{ + "type":"string", + "enum":[ + "FACE_NOT_FOUND", + "ASSOCIATED_TO_A_DIFFERENT_USER" + ] + }, + "UnsuccessfulFaceDisassociationReasons":{ + "type":"list", + "member":{"shape":"UnsuccessfulFaceDisassociationReason"} + }, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -6756,6 +7414,65 @@ "max":255, "min":0 }, + "User":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"UserId", + "documentation":"

    A provided ID for the User. Unique within the collection.

    " + }, + "UserStatus":{ + "shape":"UserStatus", + "documentation":"

    Communicates if the UserID has been updated with latest set of faces to be associated with the UserID.

    " + } + }, + "documentation":"

    Metadata of the user stored in a collection.

    " + }, + "UserFaceIdList":{ + "type":"list", + "member":{"shape":"FaceId"}, + "max":100, + "min":1 + }, + "UserId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9_.\\-:]+" + }, + "UserList":{ + "type":"list", + "member":{"shape":"User"}, + "max":500 + }, + "UserMatch":{ + "type":"structure", + "members":{ + "Similarity":{ + "shape":"Percent", + "documentation":"

    Describes the UserID metadata.

    " + }, + "User":{ + "shape":"MatchedUser", + "documentation":"

    Confidence in the match of this UserID with the input face.

    " + } + }, + "documentation":"

    Provides UserID metadata along with the confidence in the match of this UserID with the input face.

    " + }, + "UserMatchList":{ + "type":"list", + "member":{"shape":"UserMatch"}, + "max":500 + }, + "UserStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "UPDATING", + "CREATING", + "CREATED" + ] + }, "ValidationData":{ "type":"structure", "members":{ @@ -6849,5 +7566,5 @@ "exception":true } }, - "documentation":"

    This is the API Reference for Amazon Rekognition Image, Amazon Rekognition Custom Labels, Amazon Rekognition Stored Video, Amazon Rekognition Streaming Video. It provides descriptions of actions, data types, common parameters, and common errors.

    Amazon Rekognition Image

    Amazon Rekognition Custom Labels

    Amazon Rekognition Video Stored Video

    Amazon Rekognition Video Streaming Video

    " + "documentation":"

    This is the API Reference for Amazon Rekognition Image, Amazon Rekognition Custom Labels, Amazon Rekognition Stored Video, Amazon Rekognition Streaming Video. It provides descriptions of actions, data types, common parameters, and common errors.

    Amazon Rekognition Image

    Amazon Rekognition Custom Labels

    Amazon Rekognition Video Stored Video

    Amazon Rekognition Video Streaming Video

    " } diff --git a/services/resiliencehub/pom.xml b/services/resiliencehub/pom.xml index c2e02e850d35..013554eba7f6 100644 --- a/services/resiliencehub/pom.xml +++ b/services/resiliencehub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT resiliencehub AWS Java SDK :: Services :: Resiliencehub diff --git a/services/resourceexplorer2/pom.xml b/services/resourceexplorer2/pom.xml index 9ce6a2e8d66a..0d7500cb9276 100644 --- a/services/resourceexplorer2/pom.xml +++ b/services/resourceexplorer2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT resourceexplorer2 AWS Java SDK :: Services :: Resource Explorer 2 diff --git a/services/resourcegroups/pom.xml b/services/resourcegroups/pom.xml index b4f4480f6059..2f8865fd8644 100644 --- a/services/resourcegroups/pom.xml +++ b/services/resourcegroups/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 resourcegroups diff --git a/services/resourcegroupstaggingapi/pom.xml b/services/resourcegroupstaggingapi/pom.xml index c1a3a4956626..c7c23d0e92e9 100644 --- a/services/resourcegroupstaggingapi/pom.xml +++ b/services/resourcegroupstaggingapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT resourcegroupstaggingapi AWS Java SDK :: Services :: AWS Resource Groups Tagging API diff --git a/services/robomaker/pom.xml b/services/robomaker/pom.xml index e6ea98691224..3b1ffc97e71c 100644 --- a/services/robomaker/pom.xml +++ b/services/robomaker/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT robomaker AWS Java SDK :: Services :: RoboMaker diff --git a/services/rolesanywhere/pom.xml b/services/rolesanywhere/pom.xml index 5cf0fd2679fb..4edb8207b3d4 100644 --- a/services/rolesanywhere/pom.xml +++ b/services/rolesanywhere/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT rolesanywhere AWS Java SDK :: Services :: Roles Anywhere diff --git a/services/route53/pom.xml b/services/route53/pom.xml index c3ba19dbbd00..09f87585869f 100644 --- a/services/route53/pom.xml +++ b/services/route53/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT route53 AWS Java SDK :: Services :: Amazon Route53 diff --git a/services/route53domains/pom.xml b/services/route53domains/pom.xml index 601e705ecbed..840ee8c1c373 100644 --- a/services/route53domains/pom.xml +++ b/services/route53domains/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT route53domains AWS Java SDK :: Services :: Amazon Route53 Domains diff --git a/services/route53domains/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/route53domains/src/main/resources/codegen-resources/endpoint-rule-set.json index 4f994c0ade7f..3ccf51cbf5b4 100644 --- a/services/route53domains/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/route53domains/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] }, - "supportsFIPS" + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://route53domains-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://route53domains-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://route53domains-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://route53domains.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -222,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://route53domains-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://route53domains.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -231,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://route53domains.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://route53domains.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/route53domains/src/main/resources/codegen-resources/endpoint-tests.json b/services/route53domains/src/main/resources/codegen-resources/endpoint-tests.json index b2039e364642..f837786f8dd6 100644 --- a/services/route53domains/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/route53domains/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,5 +1,18 @@ { "testCases": [ + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { @@ -8,8 +21,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -21,8 +34,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -34,34 +47,234 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://route53domains.us-east-1.amazonaws.com" + "url": "https://route53domains-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://route53domains.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://example.com" + "url": "https://route53domains-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://route53domains.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53domains.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -72,8 +285,8 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -84,11 +297,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/route53domains/src/main/resources/codegen-resources/service-2.json b/services/route53domains/src/main/resources/codegen-resources/service-2.json index 25a4e2002824..8bb0ddb21abe 100644 --- a/services/route53domains/src/main/resources/codegen-resources/service-2.json +++ b/services/route53domains/src/main/resources/codegen-resources/service-2.json @@ -341,7 +341,7 @@ {"shape":"DomainLimitExceeded"}, {"shape":"OperationLimitExceeded"} ], - "documentation":"

    This operation registers a domain. Domains are registered either by Amazon Registrar (for .com, .net, and .org domains) or by our registrar associate, Gandi (for all other domains). For some top-level domains (TLDs), this operation requires extra parameters.

    When you register a domain, Amazon Route 53 does the following:

    • Creates a Route 53 hosted zone that has the same name as the domain. Route 53 assigns four name servers to your hosted zone and automatically updates your domain registration with the names of these name servers.

    • Enables auto renew, so your domain registration will renew automatically each year. We'll notify you in advance of the renewal date so you can choose whether to renew the registration.

    • Optionally enables privacy protection, so WHOIS queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you don't enable privacy protection, WHOIS queries return the information that you entered for the administrative, registrant, and technical contacts.

      You must specify the same privacy setting for the administrative, registrant, and technical contacts.

    • If registration is successful, returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant is notified by email.

    • Charges your Amazon Web Services account an amount based on the top-level domain. For more information, see Amazon Route 53 Pricing.

    " + "documentation":"

    This operation registers a domain. For some top-level domains (TLDs), this operation requires extra parameters.

    When you register a domain, Amazon Route 53 does the following:

    • Creates a Route 53 hosted zone that has the same name as the domain. Route 53 assigns four name servers to your hosted zone and automatically updates your domain registration with the names of these name servers.

    • Enables auto renew, so your domain registration will renew automatically each year. We'll notify you in advance of the renewal date so you can choose whether to renew the registration.

    • Optionally enables privacy protection, so WHOIS queries return contact for the registrar or the phrase \"REDACTED FOR PRIVACY\", or \"On behalf of <domain name> owner.\" If you don't enable privacy protection, WHOIS queries return the information that you entered for the administrative, registrant, and technical contacts.

      While some domains may allow different privacy settings per contact, we recommend specifying the same privacy setting for all contacts.

    • If registration is successful, returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant is notified by email.

    • Charges your Amazon Web Services account an amount based on the top-level domain. For more information, see Amazon Route 53 Pricing.

    " }, "RejectDomainTransferFromAnotherAwsAccount":{ "name":"RejectDomainTransferFromAnotherAwsAccount", @@ -432,7 +432,7 @@ {"shape":"DomainLimitExceeded"}, {"shape":"OperationLimitExceeded"} ], - "documentation":"

    Transfers a domain from another registrar to Amazon Route 53. When the transfer is complete, the domain is registered either with Amazon Registrar (for .com, .net, and .org domains) or with our registrar associate, Gandi (for all other TLDs).

    For more information about transferring domains, see the following topics:

    If the registrar for your domain is also the DNS service provider for the domain, we highly recommend that you transfer your DNS service to Route 53 or to another DNS service provider before you transfer your registration. Some registrars provide free DNS service when you purchase a domain registration. When you transfer the registration, the previous registrar will not renew your domain registration and could end your DNS service at any time.

    If the registrar for your domain is also the DNS service provider for the domain and you don't transfer DNS service to another provider, your website, email, and the web applications associated with the domain might become unavailable.

    If the transfer is successful, this method returns an operation ID that you can use to track the progress and completion of the action. If the transfer doesn't complete successfully, the domain registrant will be notified by email.

    " + "documentation":"

    Transfers a domain from another registrar to Amazon Route 53.

    For more information about transferring domains, see the following topics:

    If the registrar for your domain is also the DNS service provider for the domain, we highly recommend that you transfer your DNS service to Route 53 or to another DNS service provider before you transfer your registration. Some registrars provide free DNS service when you purchase a domain registration. When you transfer the registration, the previous registrar will not renew your domain registration and could end your DNS service at any time.

    If the registrar for your domain is also the DNS service provider for the domain and you don't transfer DNS service to another provider, your website, email, and the web applications associated with the domain might become unavailable.

    If the transfer is successful, this method returns an operation ID that you can use to track the progress and completion of the action. If the transfer doesn't complete successfully, the domain registrant will be notified by email.

    " }, "TransferDomainToAnotherAwsAccount":{ "name":"TransferDomainToAnotherAwsAccount", @@ -482,7 +482,7 @@ {"shape":"OperationLimitExceeded"}, {"shape":"UnsupportedTLD"} ], - "documentation":"

    This operation updates the specified domain contact's privacy setting. When privacy protection is enabled, contact information such as email address is replaced either with contact information for Amazon Registrar (for .com, .net, and .org domains) or with contact information for our registrar associate, Gandi.

    You must specify the same privacy setting for the administrative, registrant, and technical contacts.

    This operation affects only the contact information for the specified contact type (administrative, registrant, or technical). If the request succeeds, Amazon Route 53 returns an operation ID that you can use with GetOperationDetail to track the progress and completion of the action. If the request doesn't complete successfully, the domain registrant will be notified by email.

    By disabling the privacy service via API, you consent to the publication of the contact information provided for this domain via the public WHOIS database. You certify that you are the registrant of this domain name and have the authority to make this decision. You may withdraw your consent at any time by enabling privacy protection using either UpdateDomainContactPrivacy or the Route 53 console. Enabling privacy protection removes the contact information provided for this domain from the WHOIS database. For more information on our privacy practices, see https://aws.amazon.com/privacy/.

    " + "documentation":"

    This operation updates the specified domain contact's privacy setting. When privacy protection is enabled, your contact information is replaced with contact information for the registrar or with the phrase \"REDACTED FOR PRIVACY\", or \"On behalf of <domain name> owner.\"

    While some domains may allow different privacy settings per contact, we recommend specifying the same privacy setting for all contacts.

    This operation affects only the contact information for the specified contact type (administrative, registrant, or technical). If the request succeeds, Amazon Route 53 returns an operation ID that you can use with GetOperationDetail to track the progress and completion of the action. If the request doesn't complete successfully, the domain registrant will be notified by email.

    By disabling the privacy service via API, you consent to the publication of the contact information provided for this domain via the public WHOIS database. You certify that you are the registrant of this domain name and have the authority to make this decision. You may withdraw your consent at any time by enabling privacy protection using either UpdateDomainContactPrivacy or the Route 53 console. Enabling privacy protection removes the contact information provided for this domain from the WHOIS database. For more information on our privacy practices, see https://aws.amazon.com/privacy/.

    " }, "UpdateDomainNameservers":{ "name":"UpdateDomainNameservers", @@ -1429,7 +1429,7 @@ "members":{ "Name":{ "shape":"ExtraParamName", - "documentation":"

    The name of an additional parameter that is required by a top-level domain. Here are the top-level domains that require additional parameters and the names of the parameters that they require:

    .com.au and .net.au
    • AU_ID_NUMBER

    • AU_ID_TYPE

      Valid values include the following:

      • ABN (Australian business number)

      • ACN (Australian company number)

      • TM (Trademark number)

    .ca
    • BRAND_NUMBER

    • CA_BUSINESS_ENTITY_TYPE

      Valid values include the following:

      • BANK (Bank)

      • COMMERCIAL_COMPANY (Commercial company)

      • COMPANY (Company)

      • COOPERATION (Cooperation)

      • COOPERATIVE (Cooperative)

      • COOPRIX (Cooprix)

      • CORP (Corporation)

      • CREDIT_UNION (Credit union)

      • FOMIA (Federation of mutual insurance associations)

      • INC (Incorporated)

      • LTD (Limited)

      • LTEE (Limitée)

      • LLC (Limited liability corporation)

      • LLP (Limited liability partnership)

      • LTE (Lte.)

      • MBA (Mutual benefit association)

      • MIC (Mutual insurance company)

      • NFP (Not-for-profit corporation)

      • SA (S.A.)

      • SAVINGS_COMPANY (Savings company)

      • SAVINGS_UNION (Savings union)

      • SARL (Société à responsabilité limitée)

      • TRUST (Trust)

      • ULC (Unlimited liability corporation)

    • CA_LEGAL_TYPE

      When ContactType is PERSON, valid values include the following:

      • ABO (Aboriginal Peoples indigenous to Canada)

      • CCT (Canadian citizen)

      • LGR (Legal Representative of a Canadian Citizen or Permanent Resident)

      • RES (Permanent resident of Canada)

      When ContactType is a value other than PERSON, valid values include the following:

      • ASS (Canadian unincorporated association)

      • CCO (Canadian corporation)

      • EDU (Canadian educational institution)

      • GOV (Government or government entity in Canada)

      • HOP (Canadian Hospital)

      • INB (Indian Band recognized by the Indian Act of Canada)

      • LAM (Canadian Library, Archive, or Museum)

      • MAJ (Her/His Majesty the Queen/King)

      • OMK (Official mark registered in Canada)

      • PLT (Canadian Political Party)

      • PRT (Partnership Registered in Canada)

      • TDM (Trademark registered in Canada)

      • TRD (Canadian Trade Union)

      • TRS (Trust established in Canada)

    .es
    • ES_IDENTIFICATION

      The value of ES_IDENTIFICATION depends on the following values:

      • The value of ES_LEGAL_FORM

      • The value of ES_IDENTIFICATION_TYPE

      If ES_LEGAL_FORM is any value other than INDIVIDUAL:

      • Specify 1 letter + 8 numbers (CIF [Certificado de Identificación Fiscal])

      • Example: B12345678

      If ES_LEGAL_FORM is INDIVIDUAL, the value that you specify for ES_IDENTIFICATION depends on the value of ES_IDENTIFICATION_TYPE:

      • If ES_IDENTIFICATION_TYPE is DNI_AND_NIF (for Spanish contacts):

        • Specify 8 numbers + 1 letter (DNI [Documento Nacional de Identidad], NIF [Número de Identificación Fiscal])

        • Example: 12345678M

      • If ES_IDENTIFICATION_TYPE is NIE (for foreigners with legal residence):

        • Specify 1 letter + 7 numbers + 1 letter ( NIE [Número de Identidad de Extranjero])

        • Example: Y1234567X

      • If ES_IDENTIFICATION_TYPE is OTHER (for contacts outside of Spain):

        • Specify a passport number, drivers license number, or national identity card number

    • ES_IDENTIFICATION_TYPE

      Valid values include the following:

      • DNI_AND_NIF (For Spanish contacts)

      • NIE (For foreigners with legal residence)

      • OTHER (For contacts outside of Spain)

    • ES_LEGAL_FORM

      Valid values include the following:

      • ASSOCIATION

      • CENTRAL_GOVERNMENT_BODY

      • CIVIL_SOCIETY

      • COMMUNITY_OF_OWNERS

      • COMMUNITY_PROPERTY

      • CONSULATE

      • COOPERATIVE

      • DESIGNATION_OF_ORIGIN_SUPERVISORY_COUNCIL

      • ECONOMIC_INTEREST_GROUP

      • EMBASSY

      • ENTITY_MANAGING_NATURAL_AREAS

      • FARM_PARTNERSHIP

      • FOUNDATION

      • GENERAL_AND_LIMITED_PARTNERSHIP

      • GENERAL_PARTNERSHIP

      • INDIVIDUAL

      • LIMITED_COMPANY

      • LOCAL_AUTHORITY

      • LOCAL_PUBLIC_ENTITY

      • MUTUAL_INSURANCE_COMPANY

      • NATIONAL_PUBLIC_ENTITY

      • ORDER_OR_RELIGIOUS_INSTITUTION

      • OTHERS (Only for contacts outside of Spain)

      • POLITICAL_PARTY

      • PROFESSIONAL_ASSOCIATION

      • PUBLIC_LAW_ASSOCIATION

      • PUBLIC_LIMITED_COMPANY

      • REGIONAL_GOVERNMENT_BODY

      • REGIONAL_PUBLIC_ENTITY

      • SAVINGS_BANK

      • SPANISH_OFFICE

      • SPORTS_ASSOCIATION

      • SPORTS_FEDERATION

      • SPORTS_LIMITED_COMPANY

      • TEMPORARY_ALLIANCE_OF_ENTERPRISES

      • TRADE_UNION

      • WORKER_OWNED_COMPANY

      • WORKER_OWNED_LIMITED_COMPANY

    .eu
    • EU_COUNTRY_OF_CITIZENSHIP

    .fi
    • BIRTH_DATE_IN_YYYY_MM_DD

    • FI_BUSINESS_NUMBER

    • FI_ID_NUMBER

    • FI_NATIONALITY

      Valid values include the following:

      • FINNISH

      • NOT_FINNISH

    • FI_ORGANIZATION_TYPE

      Valid values include the following:

      • COMPANY

      • CORPORATION

      • GOVERNMENT

      • INSTITUTION

      • POLITICAL_PARTY

      • PUBLIC_COMMUNITY

      • TOWNSHIP

    .fr
    • BIRTH_CITY

    • BIRTH_COUNTRY

    • BIRTH_DATE_IN_YYYY_MM_DD

    • BIRTH_DEPARTMENT: Specify the INSEE code that corresponds with the department where the contact was born. If the contact was born somewhere other than France or its overseas departments, specify 99. For more information, including a list of departments and the corresponding INSEE numbers, see the Wikipedia entry Departments of France.

    • BRAND_NUMBER

    .it
    • IT_NATIONALITY

    • IT_PIN

    • IT_REGISTRANT_ENTITY_TYPE

      Valid values include the following:

      • FOREIGNERS

      • FREELANCE_WORKERS (Freelance workers and professionals)

      • ITALIAN_COMPANIES (Italian companies and one-person companies)

      • NON_PROFIT_ORGANIZATIONS

      • OTHER_SUBJECTS

      • PUBLIC_ORGANIZATIONS

    .ru
    • BIRTH_DATE_IN_YYYY_MM_DD

    • RU_PASSPORT_DATA

    .se
    • BIRTH_COUNTRY

    • SE_ID_NUMBER

    .sg
    • SG_ID_NUMBER

    .uk, .co.uk, .me.uk, and .org.uk
    • UK_CONTACT_TYPE

      Valid values include the following:

      • CRC (UK Corporation by Royal Charter)

      • FCORP (Non-UK Corporation)

      • FIND (Non-UK Individual, representing self)

      • FOTHER (Non-UK Entity that does not fit into any other category)

      • GOV (UK Government Body)

      • IND (UK Individual (representing self))

      • IP (UK Industrial/Provident Registered Company)

      • LLP (UK Limited Liability Partnership)

      • LTD (UK Limited Company)

      • OTHER (UK Entity that does not fit into any other category)

      • PLC (UK Public Limited Company)

      • PTNR (UK Partnership)

      • RCHAR (UK Registered Charity)

      • SCH (UK School)

      • STAT (UK Statutory Body)

      • STRA (UK Sole Trader)

    • UK_COMPANY_NUMBER

    In addition, many TLDs require a VAT_NUMBER.

    " + "documentation":"

    The name of an additional parameter that is required by a top-level domain. Here are the top-level domains that require additional parameters and the names of the parameters that they require:

    .com.au and .net.au
    • AU_ID_NUMBER

    • AU_ID_TYPE

      Valid values include the following:

      • ABN (Australian business number)

      • ACN (Australian company number)

      • TM (Trademark number)

    .ca
    • BRAND_NUMBER

    • CA_BUSINESS_ENTITY_TYPE

      Valid values include the following:

      • BANK (Bank)

      • COMMERCIAL_COMPANY (Commercial company)

      • COMPANY (Company)

      • COOPERATION (Cooperation)

      • COOPERATIVE (Cooperative)

      • COOPRIX (Cooprix)

      • CORP (Corporation)

      • CREDIT_UNION (Credit union)

      • FOMIA (Federation of mutual insurance associations)

      • INC (Incorporated)

      • LTD (Limited)

      • LTEE (Limitée)

      • LLC (Limited liability corporation)

      • LLP (Limited liability partnership)

      • LTE (Lte.)

      • MBA (Mutual benefit association)

      • MIC (Mutual insurance company)

      • NFP (Not-for-profit corporation)

      • SA (S.A.)

      • SAVINGS_COMPANY (Savings company)

      • SAVINGS_UNION (Savings union)

      • SARL (Société à responsabilité limitée)

      • TRUST (Trust)

      • ULC (Unlimited liability corporation)

    • CA_LEGAL_TYPE

      When ContactType is PERSON, valid values include the following:

      • ABO (Aboriginal Peoples indigenous to Canada)

      • CCT (Canadian citizen)

      • LGR (Legal Representative of a Canadian Citizen or Permanent Resident)

      • RES (Permanent resident of Canada)

      When ContactType is a value other than PERSON, valid values include the following:

      • ASS (Canadian unincorporated association)

      • CCO (Canadian corporation)

      • EDU (Canadian educational institution)

      • GOV (Government or government entity in Canada)

      • HOP (Canadian Hospital)

      • INB (Indian Band recognized by the Indian Act of Canada)

      • LAM (Canadian Library, Archive, or Museum)

      • MAJ (Her/His Majesty the Queen/King)

      • OMK (Official mark registered in Canada)

      • PLT (Canadian Political Party)

      • PRT (Partnership Registered in Canada)

      • TDM (Trademark registered in Canada)

      • TRD (Canadian Trade Union)

      • TRS (Trust established in Canada)

    .es
    • ES_IDENTIFICATION

      The value of ES_IDENTIFICATION depends on the following values:

      • The value of ES_LEGAL_FORM

      • The value of ES_IDENTIFICATION_TYPE

      If ES_LEGAL_FORM is any value other than INDIVIDUAL:

      • Specify 1 letter + 8 numbers (CIF [Certificado de Identificación Fiscal])

      • Example: B12345678

      If ES_LEGAL_FORM is INDIVIDUAL, the value that you specify for ES_IDENTIFICATION depends on the value of ES_IDENTIFICATION_TYPE:

      • If ES_IDENTIFICATION_TYPE is DNI_AND_NIF (for Spanish contacts):

        • Specify 8 numbers + 1 letter (DNI [Documento Nacional de Identidad], NIF [Número de Identificación Fiscal])

        • Example: 12345678M

      • If ES_IDENTIFICATION_TYPE is NIE (for foreigners with legal residence):

        • Specify 1 letter + 7 numbers + 1 letter ( NIE [Número de Identidad de Extranjero])

        • Example: Y1234567X

      • If ES_IDENTIFICATION_TYPE is OTHER (for contacts outside of Spain):

        • Specify a passport number, drivers license number, or national identity card number

    • ES_IDENTIFICATION_TYPE

      Valid values include the following:

      • DNI_AND_NIF (For Spanish contacts)

      • NIE (For foreigners with legal residence)

      • OTHER (For contacts outside of Spain)

    • ES_LEGAL_FORM

      Valid values include the following:

      • ASSOCIATION

      • CENTRAL_GOVERNMENT_BODY

      • CIVIL_SOCIETY

      • COMMUNITY_OF_OWNERS

      • COMMUNITY_PROPERTY

      • CONSULATE

      • COOPERATIVE

      • DESIGNATION_OF_ORIGIN_SUPERVISORY_COUNCIL

      • ECONOMIC_INTEREST_GROUP

      • EMBASSY

      • ENTITY_MANAGING_NATURAL_AREAS

      • FARM_PARTNERSHIP

      • FOUNDATION

      • GENERAL_AND_LIMITED_PARTNERSHIP

      • GENERAL_PARTNERSHIP

      • INDIVIDUAL

      • LIMITED_COMPANY

      • LOCAL_AUTHORITY

      • LOCAL_PUBLIC_ENTITY

      • MUTUAL_INSURANCE_COMPANY

      • NATIONAL_PUBLIC_ENTITY

      • ORDER_OR_RELIGIOUS_INSTITUTION

      • OTHERS (Only for contacts outside of Spain)

      • POLITICAL_PARTY

      • PROFESSIONAL_ASSOCIATION

      • PUBLIC_LAW_ASSOCIATION

      • PUBLIC_LIMITED_COMPANY

      • REGIONAL_GOVERNMENT_BODY

      • REGIONAL_PUBLIC_ENTITY

      • SAVINGS_BANK

      • SPANISH_OFFICE

      • SPORTS_ASSOCIATION

      • SPORTS_FEDERATION

      • SPORTS_LIMITED_COMPANY

      • TEMPORARY_ALLIANCE_OF_ENTERPRISES

      • TRADE_UNION

      • WORKER_OWNED_COMPANY

      • WORKER_OWNED_LIMITED_COMPANY

    .eu
    • EU_COUNTRY_OF_CITIZENSHIP

    .fi
    • BIRTH_DATE_IN_YYYY_MM_DD

    • FI_BUSINESS_NUMBER

    • FI_ID_NUMBER

    • FI_NATIONALITY

      Valid values include the following:

      • FINNISH

      • NOT_FINNISH

    • FI_ORGANIZATION_TYPE

      Valid values include the following:

      • COMPANY

      • CORPORATION

      • GOVERNMENT

      • INSTITUTION

      • POLITICAL_PARTY

      • PUBLIC_COMMUNITY

      • TOWNSHIP

    .it
    • IT_NATIONALITY

    • IT_PIN

    • IT_REGISTRANT_ENTITY_TYPE

      Valid values include the following:

      • FOREIGNERS

      • FREELANCE_WORKERS (Freelance workers and professionals)

      • ITALIAN_COMPANIES (Italian companies and one-person companies)

      • NON_PROFIT_ORGANIZATIONS

      • OTHER_SUBJECTS

      • PUBLIC_ORGANIZATIONS

    .ru
    • BIRTH_DATE_IN_YYYY_MM_DD

    • RU_PASSPORT_DATA

    .se
    • BIRTH_COUNTRY

    • SE_ID_NUMBER

    .sg
    • SG_ID_NUMBER

    .uk, .co.uk, .me.uk, and .org.uk
    • UK_CONTACT_TYPE

      Valid values include the following:

      • CRC (UK Corporation by Royal Charter)

      • FCORP (Non-UK Corporation)

      • FIND (Non-UK Individual, representing self)

      • FOTHER (Non-UK Entity that does not fit into any other category)

      • GOV (UK Government Body)

      • IND (UK Individual (representing self))

      • IP (UK Industrial/Provident Registered Company)

      • LLP (UK Limited Liability Partnership)

      • LTD (UK Limited Company)

      • OTHER (UK Entity that does not fit into any other category)

      • PLC (UK Public Limited Company)

      • PTNR (UK Partnership)

      • RCHAR (UK Registered Charity)

      • SCH (UK School)

      • STAT (UK Statutory Body)

      • STRA (UK Sole Trader)

    • UK_COMPANY_NUMBER

    In addition, many TLDs require a VAT_NUMBER.

    " }, "Value":{ "shape":"ExtraParamValue", @@ -1853,6 +1853,10 @@ "type":"string", "enum":["SubmittedDate"] }, + "ListPricesPageMaxItems":{ + "type":"integer", + "max":1000 + }, "ListPricesRequest":{ "type":"structure", "members":{ @@ -1865,7 +1869,7 @@ "documentation":"

    For an initial request for a list of prices, omit this element. If the number of prices that are not yet complete is greater than the value that you specified for MaxItems, you can use Marker to return additional prices. Get the value of NextPageMarker from the previous response, and submit another request that includes the value of NextPageMarker in the Marker element.

    Used only for all TLDs. If you specify a TLD, don't specify a Marker.

    " }, "MaxItems":{ - "shape":"PageMaxItems", + "shape":"ListPricesPageMaxItems", "documentation":"

    Number of Prices to be returned.

    Used only for all TLDs. If you specify a TLD, don't specify a MaxItems.

    " } } @@ -2398,7 +2402,7 @@ }, "PrivacyProtectAdminContact":{ "shape":"Boolean", - "documentation":"

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that you entered for the admin contact.

    You must specify the same privacy setting for the administrative, registrant, and technical contacts.

    Default: true

    " + "documentation":"

    Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information for the registrar, the phrase \"REDACTED FOR PRIVACY\", or \"On behalf of <domain name> owner.\".

    While some domains may allow different privacy settings per contact, we recommend specifying the same privacy setting for all contacts.

    Default: true

    " }, "PrivacyProtectRegistrantContact":{ "shape":"Boolean", @@ -2531,7 +2535,7 @@ }, "Consent":{ "shape":"Consent", - "documentation":"

    Customer's consent for the owner change request.

    " + "documentation":"

    Customer's consent for the owner change request. Required if the domain is not free (consent price is more than $0.00).

    " } }, "documentation":"

    The UpdateDomainContact request includes the following elements.

    " diff --git a/services/route53recoverycluster/pom.xml b/services/route53recoverycluster/pom.xml index 33fef24a8dbb..6e264dae18f7 100644 --- a/services/route53recoverycluster/pom.xml +++ b/services/route53recoverycluster/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT route53recoverycluster AWS Java SDK :: Services :: Route53 Recovery Cluster diff --git a/services/route53recoverycontrolconfig/pom.xml b/services/route53recoverycontrolconfig/pom.xml index 224b27b1405c..92090003921f 100644 --- a/services/route53recoverycontrolconfig/pom.xml +++ b/services/route53recoverycontrolconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT route53recoverycontrolconfig AWS Java SDK :: Services :: Route53 Recovery Control Config diff --git a/services/route53recoveryreadiness/pom.xml b/services/route53recoveryreadiness/pom.xml index d534bc2bbab7..2cc688f2583c 100644 --- a/services/route53recoveryreadiness/pom.xml +++ b/services/route53recoveryreadiness/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT route53recoveryreadiness AWS Java SDK :: Services :: Route53 Recovery Readiness diff --git a/services/route53resolver/pom.xml b/services/route53resolver/pom.xml index 8621f6b73047..2da7aabc094e 100644 --- a/services/route53resolver/pom.xml +++ b/services/route53resolver/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT route53resolver AWS Java SDK :: Services :: Route53Resolver diff --git a/services/rum/pom.xml b/services/rum/pom.xml index df2a2a6782eb..7f5a280c2d7c 100644 --- a/services/rum/pom.xml +++ b/services/rum/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT rum AWS Java SDK :: Services :: RUM diff --git a/services/s3/pom.xml b/services/s3/pom.xml index 4eb871c19a4a..96629adef4a1 100644 --- a/services/s3/pom.xml +++ b/services/s3/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT s3 AWS Java SDK :: Services :: Amazon S3 diff --git a/services/s3/src/main/resources/codegen-resources/endpoint-tests.json b/services/s3/src/main/resources/codegen-resources/endpoint-tests.json index dd599bc3414c..f1dc6f77dccf 100644 --- a/services/s3/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/s3/src/main/resources/codegen-resources/endpoint-tests.json @@ -2628,6 +2628,1366 @@ "Accelerate": false } }, + { + "documentation": "non-bucket endpoint with FIPS: TODO(descriptive)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://beta.example.com:1234/path" + } + }, + "params": { + "Region": "us-west-2", + "Endpoint": "http://beta.example.com:1234/path", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "FIPS + dualstack + custom endpoint TODO(descriptive)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://beta.example.com:1234/path" + } + }, + "params": { + "Region": "us-west-2", + "Endpoint": "http://beta.example.com:1234/path", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "dualstack + custom endpoint TODO(descriptive)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://beta.example.com:1234/path" + } + }, + "params": { + "Region": "us-west-2", + "Endpoint": "http://beta.example.com:1234/path", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "custom endpoint without FIPS/dualstack", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://beta.example.com:1234/path" + } + }, + "params": { + "Region": "us-west-2", + "Endpoint": "http://beta.example.com:1234/path", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "s3 object lambda with access points disabled", + "expect": { + "error": "Access points are not supported for this operation" + }, + "params": { + "Region": "us-west-2", + "Bucket": "arn:aws:s3-object-lambda:us-west-2:123456789012:accesspoint:myendpoint", + "DisableAccessPoints": true + } + }, + { + "documentation": "non bucket + FIPS", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3-fips.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "standard non bucket endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "non bucket endpoint with FIPS + Dualstack", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3-fips.dualstack.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "non bucket endpoint with dualstack", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3.dualstack.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "use global endpoint + IP address endpoint override", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "http://127.0.0.1/bucket" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "http://127.0.0.1", + "UseGlobalEndpoint": true + } + }, + { + "documentation": "non-dns endpoint + global endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://s3.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "UseFIPS": false, + "UseDualStack": false, + "UseGlobalEndpoint": true + } + }, + { + "documentation": "endpoint override + use global endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "http://foo.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "UseFIPS": false, + "UseDualStack": false, + "UseGlobalEndpoint": true, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "FIPS + dualstack + non-bucket endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://s3-fips.dualstack.us-east-1.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "FIPS + dualstack + non-DNS endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://s3-fips.dualstack.us-east-1.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "ForcePathStyle": true, + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "endpoint override + FIPS + dualstack (BUG)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "http://foo.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "ForcePathStyle": true, + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "endpoint override + non-dns bucket + FIPS (BUG)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "http://foo.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "FIPS + bucket endpoint + force path style", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://s3-fips.us-east-1.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "ForcePathStyle": true, + "UseFIPS": true, + "UseDualStack": false, + "UseGlobalEndpoint": true + } + }, + { + "documentation": "bucket + FIPS + force path style", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://s3-fips.dualstack.us-east-1.amazonaws.com/bucket" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket", + "ForcePathStyle": true, + "UseFIPS": true, + "UseDualStack": true, + "UseGlobalEndpoint": true + } + }, + { + "documentation": "FIPS + dualstack + use global endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://bucket.s3-fips.dualstack.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket", + "UseFIPS": true, + "UseDualStack": true, + "UseGlobalEndpoint": true + } + }, + { + "documentation": "URI encoded bucket + use global endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://foo.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "UseFIPS": true, + "UseDualStack": false, + "UseGlobalEndpoint": true, + "Endpoint": "https://foo.com" + } + }, + { + "documentation": "FIPS + path based endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://s3-fips.us-east-1.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "UseFIPS": true, + "UseDualStack": false, + "Accelerate": false, + "UseGlobalEndpoint": true + } + }, + { + "documentation": "accelerate + dualstack + global endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://bucket.s3-accelerate.dualstack.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket", + "UseFIPS": false, + "UseDualStack": true, + "Accelerate": true, + "UseGlobalEndpoint": true + } + }, + { + "documentation": "dualstack + global endpoint + non URI safe bucket", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://s3.dualstack.us-east-1.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "Accelerate": false, + "UseDualStack": true, + "UseFIPS": false, + "UseGlobalEndpoint": true + } + }, + { + "documentation": "FIPS + uri encoded bucket", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://s3-fips.us-east-1.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "ForcePathStyle": true, + "Accelerate": false, + "UseDualStack": false, + "UseFIPS": true, + "UseGlobalEndpoint": true + } + }, + { + "documentation": "endpoint override + non-uri safe endpoint + force path style", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "http://foo.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "ForcePathStyle": true, + "Accelerate": false, + "UseDualStack": false, + "UseFIPS": true, + "Endpoint": "http://foo.com", + "UseGlobalEndpoint": true + } + }, + { + "documentation": "FIPS + Dualstack + global endpoint + non-dns bucket", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://s3-fips.dualstack.us-east-1.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "us-east-1", + "Bucket": "bucket!", + "Accelerate": false, + "UseDualStack": true, + "UseFIPS": true, + "UseGlobalEndpoint": true + } + }, + { + "documentation": "endpoint override + FIPS + dualstack (this is wrong—it's a bug in the UseGlobalEndpoint branch)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "http://foo.com" + } + }, + "params": { + "Region": "us-east-1", + "UseDualStack": true, + "UseFIPS": true, + "UseGlobalEndpoint": true, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "non-bucket endpoint override + dualstack + global endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "http://foo.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "UseGlobalEndpoint": true, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "Endpoint override + UseGlobalEndpoint + us-east-1", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true, + "signingRegion": "us-east-1" + } + ] + }, + "url": "http://foo.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "UseGlobalEndpoint": true, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "non-FIPS partition with FIPS set + custom endpoint", + "expect": { + "error": "Partition does not support FIPS" + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false, + "UseGlobalEndpoint": true, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "aws-global signs as us-east-1", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3-fips.dualstack.us-east-1.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket!", + "UseFIPS": true, + "Accelerate": false, + "UseDualStack": true + } + }, + { + "documentation": "aws-global signs as us-east-1", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://bucket.foo.com" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket", + "UseDualStack": false, + "UseFIPS": false, + "Accelerate": false, + "Endpoint": "https://foo.com" + } + }, + { + "documentation": "aws-global + dualstack + path-only bucket", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3.dualstack.us-east-1.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket!", + "UseDualStack": true, + "UseFIPS": false, + "Accelerate": false + } + }, + { + "documentation": "aws-global + path-only bucket", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket!" + } + }, + { + "documentation": "aws-global + fips + custom endpoint (TODO: should be an error)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://foo.com/bucket%21" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket!", + "UseDualStack": false, + "UseFIPS": true, + "Accelerate": false, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "aws-global, endpoint override & path only-bucket", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://foo.com/bucket%21" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket!", + "UseDualStack": false, + "UseFIPS": false, + "Accelerate": false, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "aws-global + dualstack + custom endpoint (TODO: should be an error)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://foo.com" + } + }, + "params": { + "Region": "aws-global", + "UseDualStack": true, + "UseFIPS": false, + "Accelerate": false, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "accelerate, dualstack + aws-global", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://bucket.s3-accelerate.dualstack.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket", + "UseDualStack": true, + "UseFIPS": false, + "Accelerate": true + } + }, + { + "documentation": "FIPS + aws-global + path only bucket. TODO: this should be an error", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3-fips.dualstack.us-east-1.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket!", + "ForcePathStyle": true, + "UseDualStack": true, + "UseFIPS": true, + "Accelerate": false + } + }, + { + "documentation": "aws-global + FIPS + endpoint override. TODO: should this be an error?", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://foo.com" + } + }, + "params": { + "Region": "aws-global", + "UseFIPS": true, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "force path style, aws-global & endpoint override", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://foo.com/bucket%21" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket!", + "ForcePathStyle": true, + "UseFIPS": true, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "ip address causes path style to be forced", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://192.168.1.1/bucket" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket", + "Endpoint": "http://192.168.1.1" + } + }, + { + "documentation": "endpoint override with aws-global region", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://foo.com" + } + }, + "params": { + "Region": "aws-global", + "UseFIPS": true, + "UseDualStack": true, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "FIPS + path-only (TODO: consider making this an error)", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-1", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3-fips.us-east-1.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket!", + "UseFIPS": true + } + }, + { + "documentation": "empty arn type", + "expect": { + "error": "Invalid ARN: No ARN type specified" + }, + "params": { + "Region": "us-east-2", + "Bucket": "arn:aws:not-s3:us-west-2:123456789012::myendpoint" + } + }, + { + "documentation": "path style can't be used with accelerate", + "expect": { + "error": "Path-style addressing cannot be used with S3 Accelerate" + }, + "params": { + "Region": "us-east-2", + "Bucket": "bucket!", + "Accelerate": true + } + }, + { + "documentation": "invalid region", + "expect": { + "error": "Invalid region: region was not a valid DNS name." + }, + "params": { + "Region": "us-east-2!", + "Bucket": "bucket.subdomain", + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "invalid region", + "expect": { + "error": "Invalid region: region was not a valid DNS name." + }, + "params": { + "Region": "us-east-2!", + "Bucket": "bucket", + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "empty arn type", + "expect": { + "error": "Invalid Access Point Name" + }, + "params": { + "Region": "us-east-2", + "Bucket": "arn:aws:s3::123456789012:accesspoint:my_endpoint" + } + }, + { + "documentation": "empty arn type", + "expect": { + "error": "Client was configured for partition `aws` but ARN (`arn:aws:s3:cn-north-1:123456789012:accesspoint:my-endpoint`) has `aws-cn`" + }, + "params": { + "Region": "us-east-2", + "Bucket": "arn:aws:s3:cn-north-1:123456789012:accesspoint:my-endpoint", + "UseArnRegion": true + } + }, + { + "documentation": "invalid arn region", + "expect": { + "error": "Invalid region in ARN: `us-east_2` (invalid DNS name)" + }, + "params": { + "Region": "us-east-2", + "Bucket": "arn:aws:s3-object-lambda:us-east_2:123456789012:accesspoint:my-endpoint", + "UseArnRegion": true + } + }, + { + "documentation": "invalid ARN outpost", + "expect": { + "error": "Invalid ARN: The outpost Id may only contain a-z, A-Z, 0-9 and `-`. Found: `op_01234567890123456`" + }, + "params": { + "Region": "us-east-2", + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost/op_01234567890123456/accesspoint/reports", + "UseArnRegion": true + } + }, + { + "documentation": "invalid ARN", + "expect": { + "error": "Invalid ARN: expected an access point name" + }, + "params": { + "Region": "us-east-2", + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost/op-01234567890123456/reports" + } + }, + { + "documentation": "invalid ARN", + "expect": { + "error": "Invalid ARN: Expected a 4-component resource" + }, + "params": { + "Region": "us-east-2", + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost/op-01234567890123456" + } + }, + { + "documentation": "invalid outpost type", + "expect": { + "error": "Expected an outpost type `accesspoint`, found not-accesspoint" + }, + "params": { + "Region": "us-east-2", + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost/op-01234567890123456/not-accesspoint/reports" + } + }, + { + "documentation": "invalid outpost type", + "expect": { + "error": "Invalid region in ARN: `us-east_1` (invalid DNS name)" + }, + "params": { + "Region": "us-east-2", + "Bucket": "arn:aws:s3-outposts:us-east_1:123456789012:outpost/op-01234567890123456/not-accesspoint/reports" + } + }, + { + "documentation": "invalid outpost type", + "expect": { + "error": "Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `12345_789012`" + }, + "params": { + "Region": "us-east-2", + "Bucket": "arn:aws:s3-outposts:us-east-1:12345_789012:outpost/op-01234567890123456/not-accesspoint/reports" + } + }, + { + "documentation": "invalid outpost type", + "expect": { + "error": "Invalid ARN: The Outpost Id was not set" + }, + "params": { + "Region": "us-east-2", + "Bucket": "arn:aws:s3-outposts:us-east-1:12345789012:outpost" + } + }, + { + "documentation": "use global endpoint virtual addressing", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-2", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://bucket.example.com" + } + }, + "params": { + "Region": "us-east-2", + "Bucket": "bucket", + "Endpoint": "http://example.com", + "UseGlobalEndpoint": true + } + }, + { + "documentation": "global endpoint + ip address", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-2", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://192.168.0.1/bucket" + } + }, + "params": { + "Region": "us-east-2", + "Bucket": "bucket", + "Endpoint": "http://192.168.0.1", + "UseGlobalEndpoint": true + } + }, + { + "documentation": "invalid outpost type", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-2", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3.us-east-2.amazonaws.com/bucket%21" + } + }, + "params": { + "Region": "us-east-2", + "Bucket": "bucket!", + "UseGlobalEndpoint": true + } + }, + { + "documentation": "invalid outpost type", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-2", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://bucket.s3-accelerate.amazonaws.com" + } + }, + "params": { + "Region": "us-east-2", + "Bucket": "bucket", + "Accelerate": true, + "UseGlobalEndpoint": true + } + }, + { + "documentation": "use global endpoint + custom endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-2", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://foo.com/bucket%21" + } + }, + "params": { + "Region": "us-east-2", + "Bucket": "bucket!", + "UseGlobalEndpoint": true, + "Endpoint": "http://foo.com" + } + }, + { + "documentation": "use global endpoint, not us-east-1, force path style", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingRegion": "us-east-2", + "name": "sigv4", + "signingName": "s3", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://foo.com/bucket%21" + } + }, + "params": { + "Region": "us-east-2", + "Bucket": "bucket!", + "UseGlobalEndpoint": true, + "ForcePathStyle": true, + "Endpoint": "http://foo.com" + } + }, { "documentation": "vanilla virtual addressing@us-west-2", "expect": { @@ -6495,6 +7855,122 @@ "UseDualStack": false, "Accelerate": false } + }, + { + "documentation": "S3 Outposts Abba - No endpoint set for beta", + "expect": { + "error": "Expected a endpoint to be specified but no endpoint was found" + }, + "params": { + "Region": "us-east-1", + "Bucket": "test-accessp-e0b1d075431d83bebde8xz5w8ijx1qzlbp3i3ebeta0--op-s3", + "UseFIPS": false, + "UseDualStack": false, + "Accelerate": false + } + }, + { + "documentation": "S3 Snow with bucket", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "snow", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://10.0.1.12:433/bucketName" + } + }, + "params": { + "Region": "snow", + "Bucket": "bucketName", + "Endpoint": "http://10.0.1.12:433", + "UseFIPS": false, + "UseDualStack": false, + "Accelerate": false + } + }, + { + "documentation": "S3 Snow without bucket", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "snow", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://10.0.1.12:433" + } + }, + "params": { + "Region": "snow", + "Endpoint": "https://10.0.1.12:433", + "UseFIPS": false, + "UseDualStack": false, + "Accelerate": false + } + }, + { + "documentation": "S3 Snow no port", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "snow", + "disableDoubleEncoding": true + } + ] + }, + "url": "http://10.0.1.12/bucketName" + } + }, + "params": { + "Region": "snow", + "Bucket": "bucketName", + "Endpoint": "http://10.0.1.12", + "UseFIPS": false, + "UseDualStack": false, + "Accelerate": false + } + }, + { + "documentation": "S3 Snow dns endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "snow", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://amazonaws.com/bucketName" + } + }, + "params": { + "Region": "snow", + "Bucket": "bucketName", + "Endpoint": "https://amazonaws.com", + "UseFIPS": false, + "UseDualStack": false, + "Accelerate": false + } } ], "version": "1.0" diff --git a/services/s3/src/main/resources/codegen-resources/service-2.json b/services/s3/src/main/resources/codegen-resources/service-2.json index ad4987a2e605..4b91bba4672c 100644 --- a/services/s3/src/main/resources/codegen-resources/service-2.json +++ b/services/s3/src/main/resources/codegen-resources/service-2.json @@ -51,7 +51,7 @@ {"shape":"ObjectNotInActiveTierError"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectCOPY.html", - "documentation":"

    Creates a copy of an object that is already stored in Amazon S3.

    You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.

    All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.

    A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy action starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. This means that a 200 OK response can contain either a success or an error. If you call the S3 API directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throws an exception (or, for the SDKs that don't use exceptions, they return the error).

    If the copy is successful, you receive a response with information about the copied object.

    If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.

    The copy request charge is based on the storage class and Region that you specify for the destination object. For pricing information, see Amazon S3 pricing.

    Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration.

    Metadata

    When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.

    To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the x-amz-metadata-directive header. When you grant permissions, you can use the s3:x-amz-metadata-directive condition key to enforce certain metadata behavior when objects are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for Amazon S3.

    x-amz-website-redirect-location is unique to each object and must be specified in the request headers to copy the value.

    x-amz-copy-source-if Headers

    To only copy an object under certain conditions, such as whether the Etag matches or whether the object was modified before or after a specified date, use the following request parameters:

    • x-amz-copy-source-if-match

    • x-amz-copy-source-if-none-match

    • x-amz-copy-source-if-unmodified-since

    • x-amz-copy-source-if-modified-since

    If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK and copies the data:

    • x-amz-copy-source-if-match condition evaluates to true

    • x-amz-copy-source-if-unmodified-since condition evaluates to false

    If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed response code:

    • x-amz-copy-source-if-none-match condition evaluates to false

    • x-amz-copy-source-if-modified-since condition evaluates to true

    All headers with the x-amz- prefix, including x-amz-copy-source, must be signed.

    Server-side encryption

    Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When copying an object, if you don't specify encryption information in your copy request, the encryption setting of the target object is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the target object copy.

    When you perform a CopyObject operation, if you want to use a different type of encryption setting for the target object, you can use other appropriate encryption-related headers to encrypt the target object with a KMS key, an Amazon S3 managed key, or a customer-provided key. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If the source object for the copy is stored in Amazon S3 using SSE-C, you must provide the necessary encryption information in your request so that Amazon S3 can decrypt the object for copying. For more information about server-side encryption, see Using Server-Side Encryption.

    If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.

    Access Control List (ACL)-Specific Request Headers

    When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

    If the bucket that you're copying objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML format.

    For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide.

    If your bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account will be owned by the bucket owner.

    Checksums

    When copying an object, if it has a checksum, that checksum will be copied to the new object by default. When you copy the object over, you may optionally specify a different checksum algorithm to use with the x-amz-checksum-algorithm header.

    Storage Class Options

    You can use the CopyObject action to change the storage class of an object that is already stored in Amazon S3 using the StorageClass parameter. For more information, see Storage Classes in the Amazon S3 User Guide.

    If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see RestoreObject. For more information, see Copying Objects.

    Versioning

    By default, x-amz-copy-source identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the versionId subresource.

    If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id response header in the response.

    If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.

    The following operations are related to CopyObject:

    ", + "documentation":"

    Creates a copy of an object that is already stored in Amazon S3.

    You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.

    All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.

    A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy action starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. This means that a 200 OK response can contain either a success or an error. If you call the S3 API directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throws an exception (or, for the SDKs that don't use exceptions, they return the error).

    If the copy is successful, you receive a response with information about the copied object.

    If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.

    The copy request charge is based on the storage class and Region that you specify for the destination object. For pricing information, see Amazon S3 pricing.

    Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration.

    Metadata

    When copying an object, you can preserve all metadata (the default) or specify new metadata. However, the access control list (ACL) is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.

    To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the x-amz-metadata-directive header. When you grant permissions, you can use the s3:x-amz-metadata-directive condition key to enforce certain metadata behavior when objects are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for Amazon S3.

    x-amz-website-redirect-location is unique to each object and must be specified in the request headers to copy the value.

    x-amz-copy-source-if Headers

    To only copy an object under certain conditions, such as whether the Etag matches or whether the object was modified before or after a specified date, use the following request parameters:

    • x-amz-copy-source-if-match

    • x-amz-copy-source-if-none-match

    • x-amz-copy-source-if-unmodified-since

    • x-amz-copy-source-if-modified-since

    If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK and copies the data:

    • x-amz-copy-source-if-match condition evaluates to true

    • x-amz-copy-source-if-unmodified-since condition evaluates to false

    If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed response code:

    • x-amz-copy-source-if-none-match condition evaluates to false

    • x-amz-copy-source-if-modified-since condition evaluates to true

    All headers with the x-amz- prefix, including x-amz-copy-source, must be signed.

    Server-side encryption

    Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When copying an object, if you don't specify encryption information in your copy request, the encryption setting of the target object is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the target object copy.

    When you perform a CopyObject operation, if you want to use a different type of encryption setting for the target object, you can use other appropriate encryption-related headers to encrypt the target object with a KMS key, an Amazon S3 managed key, or a customer-provided key. With server-side encryption, Amazon S3 encrypts your data as it writes your data to disks in its data centers and decrypts the data when you access it. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If the source object for the copy is stored in Amazon S3 using SSE-C, you must provide the necessary encryption information in your request so that Amazon S3 can decrypt the object for copying. For more information about server-side encryption, see Using Server-Side Encryption.

    If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.

    Access Control List (ACL)-Specific Request Headers

    When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups that are defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

    If the bucket that you're copying objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML format.

    For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide.

    If your bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account will be owned by the bucket owner.

    Checksums

    When copying an object, if it has a checksum, that checksum will be copied to the new object by default. When you copy the object over, you can optionally specify a different checksum algorithm to use with the x-amz-checksum-algorithm header.

    Storage Class Options

    You can use the CopyObject action to change the storage class of an object that is already stored in Amazon S3 by using the StorageClass parameter. For more information, see Storage Classes in the Amazon S3 User Guide.

    If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see RestoreObject. For more information, see Copying Objects.

    Versioning

    By default, x-amz-copy-source header identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the versionId subresource.

    If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id response header in the response.

    If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.

    The following operations are related to CopyObject:

    ", "alias":"PutObjectCopy" }, "CreateBucket":{ @@ -67,7 +67,7 @@ {"shape":"BucketAlreadyOwnedByYou"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html", - "documentation":"

    Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.

    Not every string is an acceptable bucket name. For information about bucket naming restrictions, see Bucket naming rules.

    If you want to create an Amazon S3 on Outposts bucket, see Create Bucket.

    By default, the bucket is created in the US East (N. Virginia) Region. You can optionally specify a Region in the request body. You might choose a Region to optimize latency, minimize costs, or address regulatory requirements. For example, if you reside in Europe, you will probably find it advantageous to create buckets in the Europe (Ireland) Region. For more information, see Accessing a bucket.

    If you send your create bucket request to the s3.amazonaws.com endpoint, the request goes to the us-east-1 Region. Accordingly, the signature calculations in Signature Version 4 must use us-east-1 as the Region, even if the location constraint in the request specifies another Region where the bucket is to be created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to handle 307 redirect. For more information, see Virtual hosting of buckets.

    Access control lists (ACLs)

    When creating a bucket using this operation, you can optionally configure the bucket ACL to specify the accounts or groups that should be granted specific permissions on the bucket.

    If your CreateBucket request sets bucket owner enforced for S3 Object Ownership and specifies a bucket ACL that provides access to an external Amazon Web Services account, your request fails with a 400 error and returns the InvalidBucketAclWithObjectOwnership error code. For more information, see Controlling object ownership in the Amazon S3 User Guide.

    There are two ways to grant the appropriate permissions using the request headers.

    • Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

    • Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. These headers map to the set of permissions Amazon S3 supports in an ACL. For more information, see Access control list (ACL) overview.

      You specify each grantee as a type=value pair, where the type is one of the following:

      • id – if the value specified is the canonical user ID of an Amazon Web Services account

      • uri – if you are granting permissions to a predefined group

      • emailAddress – if the value specified is the email address of an Amazon Web Services account

        Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

        • US East (N. Virginia)

        • US West (N. California)

        • US West (Oregon)

        • Asia Pacific (Singapore)

        • Asia Pacific (Sydney)

        • Asia Pacific (Tokyo)

        • Europe (Ireland)

        • South America (São Paulo)

        For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

      For example, the following x-amz-grant-read header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata:

      x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"

    You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

    Permissions

    In addition to s3:CreateBucket, the following permissions are required when your CreateBucket includes specific headers:

    • ACLs - If your CreateBucket request specifies ACL permissions and the ACL is public-read, public-read-write, authenticated-read, or if you specify access permissions explicitly through any other ACL, both s3:CreateBucket and s3:PutBucketAcl permissions are needed. If the ACL the CreateBucket request is private or doesn't specify any ACLs, only s3:CreateBucket permission is needed.

    • Object Lock - If ObjectLockEnabledForBucket is set to true in your CreateBucket request, s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are required.

    • S3 Object Ownership - If your CreateBucket request includes the x-amz-object-ownership header, s3:PutBucketOwnershipControls permission is required.

    The following operations are related to CreateBucket:

    ", + "documentation":"

    Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.

    Not every string is an acceptable bucket name. For information about bucket naming restrictions, see Bucket naming rules.

    If you want to create an Amazon S3 on Outposts bucket, see Create Bucket.

    By default, the bucket is created in the US East (N. Virginia) Region. You can optionally specify a Region in the request body. You might choose a Region to optimize latency, minimize costs, or address regulatory requirements. For example, if you reside in Europe, you will probably find it advantageous to create buckets in the Europe (Ireland) Region. For more information, see Accessing a bucket.

    If you send your create bucket request to the s3.amazonaws.com endpoint, the request goes to the us-east-1 Region. Accordingly, the signature calculations in Signature Version 4 must use us-east-1 as the Region, even if the location constraint in the request specifies another Region where the bucket is to be created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to handle 307 redirect. For more information, see Virtual hosting of buckets.

    Permissions

    In addition to s3:CreateBucket, the following permissions are required when your CreateBucket request includes specific headers:

    • Access control lists (ACLs) - If your CreateBucket request specifies access control list (ACL) permissions and the ACL is public-read, public-read-write, authenticated-read, or if you specify access permissions explicitly through any other ACL, both s3:CreateBucket and s3:PutBucketAcl permissions are needed. If the ACL for the CreateBucket request is private or if the request doesn't specify any ACLs, only s3:CreateBucket permission is needed.

    • Object Lock - If ObjectLockEnabledForBucket is set to true in your CreateBucket request, s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are required.

    • S3 Object Ownership - If your CreateBucket request includes the x-amz-object-ownership header, then the s3:PutBucketOwnershipControls permission is required. By default, ObjectOwnership is set to BucketOWnerEnforced and ACLs are disabled. We recommend keeping ACLs disabled, except in uncommon use cases where you must control access for each object individually. If you want to change the ObjectOwnership setting, you can use the x-amz-object-ownership header in your CreateBucket request to set the ObjectOwnership setting of your choice. For more information about S3 Object Ownership, see Controlling object ownership in the Amazon S3 User Guide.

    • S3 Block Public Access - If your specific use case requires granting public access to your S3 resources, you can disable Block Public Access. You can create a new bucket with Block Public Access enabled, then separately call the DeletePublicAccessBlock API. To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. By default, all Block Public Access settings are enabled for new buckets. To avoid inadvertent exposure of your resources, we recommend keeping the S3 Block Public Access settings enabled. For more information about S3 Block Public Access, see Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.

    If your CreateBucket request sets BucketOwnerEnforced for Amazon S3 Object Ownership and specifies a bucket ACL that provides access to an external Amazon Web Services account, your request fails with a 400 error and returns the InvalidBucketAcLWithObjectOwnership error code. For more information, see Setting Object Ownership on an existing bucket in the Amazon S3 User Guide.

    The following operations are related to CreateBucket:

    ", "alias":"PutBucket", "staticContextParams":{ "DisableAccessPoints":{"value":true} @@ -115,7 +115,7 @@ }, "input":{"shape":"DeleteBucketCorsRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEcors.html", - "documentation":"

    Deletes the cors configuration information set for the bucket.

    To use this operation, you must have permission to perform the s3:PutBucketCORS action. The bucket owner has this permission by default and can grant this permission to others.

    For information about cors, see Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide.

    The following operations are related to DeleteBucketCors:

    " + "documentation":"

    Deletes the cors configuration information set for the bucket.

    To use this operation, you must have permission to perform the s3:PutBucketCORS action. The bucket owner has this permission by default and can grant this permission to others.

    For information about cors, see Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide.

    Related Resources

    " }, "DeleteBucketEncryption":{ "name":"DeleteBucketEncryption", @@ -516,7 +516,7 @@ {"shape":"InvalidObjectState"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html", - "documentation":"

    Retrieves objects from Amazon S3. To use GET, you must have READ access to the object. If you grant READ access to the anonymous user, you can return the object without using an authorization header.

    An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg.

    To get an object from such a logical hierarchy, specify the full key name for the object in the GET operation. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg, specify the resource as /photos/2006/February/sample.jpg. For a path-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, specify the resource as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host Header Bucket Specification.

    For more information about returning the ACL of an object, see GetObjectAcl.

    If the object you are retrieving is stored in the S3 Glacier or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this action returns an InvalidObjectState error. For information about restoring archived objects, see Restoring Archived Objects.

    Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

    If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers:

    • x-amz-server-side-encryption-customer-algorithm

    • x-amz-server-side-encryption-customer-key

    • x-amz-server-side-encryption-customer-key-MD5

    For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

    Assuming you have the relevant permission to read object tags, the response also returns the x-amz-tagging-count header that provides the count of number of tags associated with the object. You can use GetObjectTagging to retrieve the tag set associated with an object.

    Permissions

    You need the relevant read object (or version) permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

    • If you have the s3:ListBucket permission on the bucket, Amazon S3 will return an HTTP status code 404 (\"no such key\") error.

    • If you don’t have the s3:ListBucket permission, Amazon S3 will return an HTTP status code 403 (\"access denied\") error.

    Versioning

    By default, the GET action returns the current version of an object. To return a different version, use the versionId subresource.

    • If you supply a versionId, you need the s3:GetObjectVersion permission to access a specific version of an object. If you request a specific version, you do not need to have the s3:GetObject permission. If you request the current version without a specific version ID, only s3:GetObject permission is required. s3:GetObjectVersion permission won't be required.

    • If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

    For more information about versioning, see PutBucketVersioning.

    Overriding Response Header Values

    There are times when you want to override certain response header values in a GET response. For example, you might override the Content-Disposition response header value in your GET request.

    You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GET response are Content-Type, Content-Language, Expires, Cache-Control, Content-Disposition, and Content-Encoding. To override these header values in the GET response, you use the following request parameters.

    You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request.

    • response-content-type

    • response-content-language

    • response-expires

    • response-cache-control

    • response-content-disposition

    • response-content-encoding

    Overriding Response Header Values

    If both of the If-Match and If-Unmodified-Since headers are present in the request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since condition evaluates to false; then, S3 returns 200 OK and the data requested.

    If both of the If-None-Match and If-Modified-Since headers are present in the request as follows: If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified response code.

    For more information about conditional requests, see RFC 7232.

    The following operations are related to GetObject:

    ", + "documentation":"

    Retrieves objects from Amazon S3. To use GET, you must have READ access to the object. If you grant READ access to the anonymous user, you can return the object without using an authorization header.

    An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg.

    To get an object from such a logical hierarchy, specify the full key name for the object in the GET operation. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg, specify the resource as /photos/2006/February/sample.jpg. For a path-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, specify the resource as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host Header Bucket Specification.

    For more information about returning the ACL of an object, see GetObjectAcl.

    If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this action returns an InvalidObjectState error. For information about restoring archived objects, see Restoring Archived Objects.

    Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 Bad Request error.

    If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers:

    • x-amz-server-side-encryption-customer-algorithm

    • x-amz-server-side-encryption-customer-key

    • x-amz-server-side-encryption-customer-key-MD5

    For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

    Assuming you have the relevant permission to read object tags, the response also returns the x-amz-tagging-count header that provides the count of number of tags associated with the object. You can use GetObjectTagging to retrieve the tag set associated with an object.

    Permissions

    You need the relevant read object (or version) permission for this operation. For more information, see Specifying Permissions in a Policy. If the object that you request doesn’t exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

    If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 (Not Found) error.

    If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 (\"access denied\") error.

    Versioning

    By default, the GET action returns the current version of an object. To return a different version, use the versionId subresource.

    • If you supply a versionId, you need the s3:GetObjectVersion permission to access a specific version of an object. If you request a specific version, you do not need to have the s3:GetObject permission. If you request the current version without a specific version ID, only s3:GetObject permission is required. s3:GetObjectVersion permission won't be required.

    • If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

    For more information about versioning, see PutBucketVersioning.

    Overriding Response Header Values

    There are times when you want to override certain response header values in a GET response. For example, you might override the Content-Disposition response header value in your GET request.

    You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GET response are Content-Type, Content-Language, Expires, Cache-Control, Content-Disposition, and Content-Encoding. To override these header values in the GET response, you use the following request parameters.

    You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request.

    • response-content-type

    • response-content-language

    • response-expires

    • response-cache-control

    • response-content-disposition

    • response-content-encoding

    Overriding Response Header Values

    If both of the If-Match and If-Unmodified-Since headers are present in the request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since condition evaluates to false; then, S3 returns 200 OK and the data requested.

    If both of the If-None-Match and If-Modified-Since headers are present in the request as follows: If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified response code.

    For more information about conditional requests, see RFC 7232.

    The following operations are related to GetObject:

    ", "httpChecksum":{ "requestValidationModeMember":"ChecksumMode", "responseAlgorithms":[ @@ -640,7 +640,7 @@ {"shape":"NoSuchKey"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectHEAD.html", - "documentation":"

    The HEAD action retrieves metadata from an object without returning the object itself. This action is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.

    A HEAD request has the same options as a GET action on an object. The response is identical to the GET response except that there is no response body. Because of this, if the HEAD request generates an error, it returns a generic 400 Bad Request, 403 Forbidden or 404 Not Found code. It is not possible to retrieve the exact exception beyond these error codes.

    If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers:

    • x-amz-server-side-encryption-customer-algorithm

    • x-amz-server-side-encryption-customer-key

    • x-amz-server-side-encryption-customer-key-MD5

    For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

    • Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

    • The last modified property in this case is the creation date of the object.

    Request headers are limited to 8 KB in size. For more information, see Common Request Headers.

    Consider the following when using request headers:

    • Consideration 1 – If both of the If-Match and If-Unmodified-Since headers are present in the request as follows:

      • If-Match condition evaluates to true, and;

      • If-Unmodified-Since condition evaluates to false;

      Then Amazon S3 returns 200 OK and the data requested.

    • Consideration 2 – If both of the If-None-Match and If-Modified-Since headers are present in the request as follows:

      • If-None-Match condition evaluates to false, and;

      • If-Modified-Since condition evaluates to true;

      Then Amazon S3 returns the 304 Not Modified response code.

    For more information about conditional requests, see RFC 7232.

    Permissions

    You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

    • If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 (\"no such key\") error.

    • If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 (\"access denied\") error.

    The following actions are related to HeadObject:

    " + "documentation":"

    The HEAD action retrieves metadata from an object without returning the object itself. This action is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.

    A HEAD request has the same options as a GET action on an object. The response is identical to the GET response except that there is no response body. Because of this, if the HEAD request generates an error, it returns a generic 400 Bad Request, 403 Forbidden or 404 Not Found code. It is not possible to retrieve the exact exception beyond these error codes.

    If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers:

    • x-amz-server-side-encryption-customer-algorithm

    • x-amz-server-side-encryption-customer-key

    • x-amz-server-side-encryption-customer-key-MD5

    For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

    • Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 Bad Request error.

    • The last modified property in this case is the creation date of the object.

    Request headers are limited to 8 KB in size. For more information, see Common Request Headers.

    Consider the following when using request headers:

    • Consideration 1 – If both of the If-Match and If-Unmodified-Since headers are present in the request as follows:

      • If-Match condition evaluates to true, and;

      • If-Unmodified-Since condition evaluates to false;

      Then Amazon S3 returns 200 OK and the data requested.

    • Consideration 2 – If both of the If-None-Match and If-Modified-Since headers are present in the request as follows:

      • If-None-Match condition evaluates to false, and;

      • If-Modified-Since condition evaluates to true;

      Then Amazon S3 returns the 304 Not Modified response code.

    For more information about conditional requests, see RFC 7232.

    Permissions

    You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3. If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

    • If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 error.

    • If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 error.

    The following actions are related to HeadObject:

    " }, "ListBucketAnalyticsConfigurations":{ "name":"ListBucketAnalyticsConfigurations", @@ -776,7 +776,7 @@ }, "input":{"shape":"PutBucketAclRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTacl.html", - "documentation":"

    Sets the permissions on an existing bucket using access control lists (ACL). For more information, see Using ACLs. To set the ACL of a bucket, you must have WRITE_ACP permission.

    You can use one of the following two ways to set a bucket's permissions:

    • Specify the ACL in the request body

    • Specify permissions using request headers

    You cannot specify access permission using both the body and the request headers.

    Depending on your application needs, you may choose to set the ACL on a bucket using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, then you can continue to use that approach.

    If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return the AccessControlListNotSupported error code. Requests to read ACLs are still supported. For more information, see Controlling object ownership in the Amazon S3 User Guide.

    Permissions

    You can set access permissions using one of the following methods:

    • Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.

    • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers, you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use the x-amz-acl header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

      You specify each grantee as a type=value pair, where the type is one of the following:

      • id – if the value specified is the canonical user ID of an Amazon Web Services account

      • uri – if you are granting permissions to a predefined group

      • emailAddress – if the value specified is the email address of an Amazon Web Services account

        Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

        • US East (N. Virginia)

        • US West (N. California)

        • US West (Oregon)

        • Asia Pacific (Singapore)

        • Asia Pacific (Sydney)

        • Asia Pacific (Tokyo)

        • Europe (Ireland)

        • South America (São Paulo)

        For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

      For example, the following x-amz-grant-write header grants create, overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and two Amazon Web Services accounts identified by their email addresses.

      x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\", id=\"111122223333\", id=\"555566667777\"

    You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

    Grantee Values

    You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

    • By the person's ID:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

      DisplayName is optional and ignored in the request

    • By URI:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

    • By Email address:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>&</Grantee>

      The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

      Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

      • US East (N. Virginia)

      • US West (N. California)

      • US West (Oregon)

      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

      • Asia Pacific (Tokyo)

      • Europe (Ireland)

      • South America (São Paulo)

      For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

    The following operations are related to PutBucketAcl:

    ", + "documentation":"

    Sets the permissions on an existing bucket using access control lists (ACL). For more information, see Using ACLs. To set the ACL of a bucket, you must have WRITE_ACP permission.

    You can use one of the following two ways to set a bucket's permissions:

    • Specify the ACL in the request body

    • Specify permissions using request headers

    You cannot specify access permission using both the body and the request headers.

    Depending on your application needs, you may choose to set the ACL on a bucket using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, then you can continue to use that approach.

    If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return the AccessControlListNotSupported error code. Requests to read ACLs are still supported. For more information, see Controlling object ownership in the Amazon S3 User Guide.

    Permissions

    You can set access permissions by using one of the following methods:

    • Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.

    • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers, you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use the x-amz-acl header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

      You specify each grantee as a type=value pair, where the type is one of the following:

      • id – if the value specified is the canonical user ID of an Amazon Web Services account

      • uri – if you are granting permissions to a predefined group

      • emailAddress – if the value specified is the email address of an Amazon Web Services account

        Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

        • US East (N. Virginia)

        • US West (N. California)

        • US West (Oregon)

        • Asia Pacific (Singapore)

        • Asia Pacific (Sydney)

        • Asia Pacific (Tokyo)

        • Europe (Ireland)

        • South America (São Paulo)

        For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

      For example, the following x-amz-grant-write header grants create, overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and two Amazon Web Services accounts identified by their email addresses.

      x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\", id=\"111122223333\", id=\"555566667777\"

    You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

    Grantee Values

    You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

    • By the person's ID:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

      DisplayName is optional and ignored in the request

    • By URI:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

    • By Email address:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>&</Grantee>

      The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

      Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

      • US East (N. Virginia)

      • US West (N. California)

      • US West (Oregon)

      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

      • Asia Pacific (Tokyo)

      • Europe (Ireland)

      • South America (São Paulo)

      For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

    The following operations are related to PutBucketAcl:

    ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":true @@ -812,7 +812,7 @@ "requestUri":"/{Bucket}?encryption" }, "input":{"shape":"PutBucketEncryptionRequest"}, - "documentation":"

    This action uses the encryption subresource to configure default encryption and Amazon S3 Bucket Keys for an existing bucket.

    By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption for a bucket by using server-side encryption with an Amazon Web Services KMS key (SSE-KMS) or a customer-provided key (SSE-C). If you specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. For information about bucket default encryption, see Amazon S3 bucket default encryption in the Amazon S3 User Guide. For more information about S3 Bucket Keys, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.

    This action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4).

    To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.

    The following operations are related to PutBucketEncryption:

    ", + "documentation":"

    This action uses the encryption subresource to configure default encryption and Amazon S3 Bucket Keys for an existing bucket.

    By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with customer-provided keys (SSE-C). If you specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. For information about bucket default encryption, see Amazon S3 bucket default encryption in the Amazon S3 User Guide. For more information about S3 Bucket Keys, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.

    This action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4).

    To use this operation, you must have permission to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.

    The following operations are related to PutBucketEncryption:

    ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":true @@ -858,7 +858,7 @@ "requestUri":"/{Bucket}?lifecycle" }, "input":{"shape":"PutBucketLifecycleConfigurationRequest"}, - "documentation":"

    Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. Keep in mind that this will overwrite an existing lifecycle configuration, so if you want to retain any configuration details, they must be included in the new lifecycle configuration. For information about lifecycle configuration, see Managing your storage lifecycle.

    Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle.

    Rules

    You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not adjustable. Each rule consists of the following:

    • Filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, or a combination of both.

    • Status whether the rule is in effect.

    • One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions.

    For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.

    Permissions

    By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration permission.

    You can also explicitly deny permissions. Explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

    • s3:DeleteObject

    • s3:DeleteObjectVersion

    • s3:PutLifecycleConfiguration

    For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.

    The following operations are related to PutBucketLifecycleConfiguration:

    ", + "documentation":"

    Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. Keep in mind that this will overwrite an existing lifecycle configuration, so if you want to retain any configuration details, they must be included in the new lifecycle configuration. For information about lifecycle configuration, see Managing your storage lifecycle.

    Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle.

    Rules

    You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not adjustable. Each rule consists of the following:

    • A filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, or a combination of both.

    • A status indicating whether the rule is in effect.

    • One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions.

    For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.

    Permissions

    By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration permission.

    You can also explicitly deny permissions. An explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

    • s3:DeleteObject

    • s3:DeleteObjectVersion

    • s3:PutLifecycleConfiguration

    For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.

    The following operations are related to PutBucketLifecycleConfiguration:

    ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":true @@ -872,7 +872,7 @@ }, "input":{"shape":"PutBucketLoggingRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlogging.html", - "documentation":"

    Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. All logs are saved to buckets in the same Amazon Web Services Region as the source bucket. To set the logging status of a bucket, you must be the bucket owner.

    The bucket owner is automatically granted FULL_CONTROL to all logs. You use the Grantee request element to grant access to other people. The Permissions request element specifies the kind of access the grantee has to the logs.

    If the target bucket for log delivery uses the bucket owner enforced setting for S3 Object Ownership, you can't use the Grantee request element to grant access to others. Permissions can only be granted using policies. For more information, see Permissions for server access log delivery in the Amazon S3 User Guide.

    Grantee Values

    You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

    • By the person's ID:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

      DisplayName is optional and ignored in the request.

    • By Email address:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress></Grantee>

      The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

    • By URI:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

    To enable logging, you use LoggingEnabled and its children request elements. To disable logging, you use an empty BucketLoggingStatus request element:

    <BucketLoggingStatus xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\" />

    For more information about server access logging, see Server Access Logging in the Amazon S3 User Guide.

    For more information about creating a bucket, see CreateBucket. For more information about returning the logging status of a bucket, see GetBucketLogging.

    The following operations are related to PutBucketLogging:

    ", + "documentation":"

    Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. All logs are saved to buckets in the same Amazon Web Services Region as the source bucket. To set the logging status of a bucket, you must be the bucket owner.

    The bucket owner is automatically granted FULL_CONTROL to all logs. You use the Grantee request element to grant access to other people. The Permissions request element specifies the kind of access the grantee has to the logs.

    If the target bucket for log delivery uses the bucket owner enforced setting for S3 Object Ownership, you can't use the Grantee request element to grant access to others. Permissions can only be granted using policies. For more information, see Permissions for server access log delivery in the Amazon S3 User Guide.

    Grantee Values

    You can specify the person (grantee) to whom you're assigning access rights (by using request elements) in the following ways:

    • By the person's ID:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

      DisplayName is optional and ignored in the request.

    • By Email address:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress></Grantee>

      The grantee is resolved to the CanonicalUser and, in a response to a GETObjectAcl request, appears as the CanonicalUser.

    • By URI:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

    To enable logging, you use LoggingEnabled and its children request elements. To disable logging, you use an empty BucketLoggingStatus request element:

    <BucketLoggingStatus xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\" />

    For more information about server access logging, see Server Access Logging in the Amazon S3 User Guide.

    For more information about creating a bucket, see CreateBucket. For more information about returning the logging status of a bucket, see GetBucketLogging.

    The following operations are related to PutBucketLogging:

    ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":true @@ -885,7 +885,7 @@ "requestUri":"/{Bucket}?metrics" }, "input":{"shape":"PutBucketMetricsConfigurationRequest"}, - "documentation":"

    Sets a metrics configuration (specified by the metrics configuration ID) for the bucket. You can have up to 1,000 metrics configurations per bucket. If you're updating an existing metrics configuration, note that this is a full replacement of the existing metrics configuration. If you don't include the elements you want to keep, they are erased.

    To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.

    The following operations are related to PutBucketMetricsConfiguration:

    GetBucketLifecycle has the following special error:

    • Error code: TooManyConfigurations

      • Description: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.

      • HTTP Status Code: HTTP 400 Bad Request

    " + "documentation":"

    Sets a metrics configuration (specified by the metrics configuration ID) for the bucket. You can have up to 1,000 metrics configurations per bucket. If you're updating an existing metrics configuration, note that this is a full replacement of the existing metrics configuration. If you don't include the elements you want to keep, they are erased.

    To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.

    The following operations are related to PutBucketMetricsConfiguration:

    PutBucketMetricsConfiguration has the following special error:

    • Error code: TooManyConfigurations

      • Description: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.

      • HTTP Status Code: HTTP 400 Bad Request

    " }, "PutBucketNotification":{ "name":"PutBucketNotification", @@ -909,7 +909,7 @@ "requestUri":"/{Bucket}?notification" }, "input":{"shape":"PutBucketNotificationConfigurationRequest"}, - "documentation":"

    Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event Notifications.

    Using this API, you can replace an existing notification configuration. The configuration is an XML file that defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to publish an event notification when it detects an event of the specified type.

    By default, your bucket has no event notifications configured. That is, the notification configuration will be an empty NotificationConfiguration.

    <NotificationConfiguration>

    </NotificationConfiguration>

    This action replaces the existing notification configuration with the configuration you include in the request body.

    After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to publish to it by sending a test notification. In the case of Lambda destinations, Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information, see Configuring Notifications for Amazon S3 Events.

    You can disable notifications by adding the empty NotificationConfiguration element.

    For more information about the number of event notification configurations that you can create per bucket, see Amazon S3 service quotas in Amazon Web Services General Reference.

    By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a bucket policy to grant permission to other users to set this configuration with s3:PutBucketNotification permission.

    The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS topic. If the message fails, the entire PUT action will fail, and Amazon S3 will not add the configuration to your bucket.

    If the configuration in the request body includes only one TopicConfiguration specifying only the s3:ReducedRedundancyLostObject event type, the response will also include the x-amz-sns-test-message-id header containing the message ID of the test notification sent to the topic.

    The following action is related to PutBucketNotificationConfiguration:

    " + "documentation":"

    Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event Notifications.

    Using this API, you can replace an existing notification configuration. The configuration is an XML file that defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to publish an event notification when it detects an event of the specified type.

    By default, your bucket has no event notifications configured. That is, the notification configuration will be an empty NotificationConfiguration.

    <NotificationConfiguration>

    </NotificationConfiguration>

    This action replaces the existing notification configuration with the configuration you include in the request body.

    After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to publish to it by sending a test notification. In the case of Lambda destinations, Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information, see Configuring Notifications for Amazon S3 Events.

    You can disable notifications by adding the empty NotificationConfiguration element.

    For more information about the number of event notification configurations that you can create per bucket, see Amazon S3 service quotas in Amazon Web Services General Reference.

    By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a bucket policy to grant permission to other users to set this configuration with the required s3:PutBucketNotification permission.

    The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS topic. If the message fails, the entire PUT action will fail, and Amazon S3 will not add the configuration to your bucket.

    If the configuration in the request body includes only one TopicConfiguration specifying only the s3:ReducedRedundancyLostObject event type, the response will also include the x-amz-sns-test-message-id header containing the message ID of the test notification sent to the topic.

    The following action is related to PutBucketNotificationConfiguration:

    " }, "PutBucketOwnershipControls":{ "name":"PutBucketOwnershipControls", @@ -1013,7 +1013,7 @@ "input":{"shape":"PutObjectRequest"}, "output":{"shape":"PutObjectOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUT.html", - "documentation":"

    Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.

    Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket. You cannot use PutObject to only update a single piece of metadata for an existing object. You must put the entire object with updated metadata if you want to update some values.

    Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. To prevent objects from being deleted or overwritten, you can use Amazon S3 Object Lock.

    To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

    • To successfully complete the PutObject request, you must have the s3:PutObject in your IAM permissions.

    • To successfully change the objects acl of your PutObject request, you must have the s3:PutObjectAcl in your IAM permissions.

    • To successfully set the tag-set with your PutObject request, you must have the s3:PutObjectTagging in your IAM permissions.

    • The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon S3 User Guide.

    You have three mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at by rest using server-side encryption with other key options. For more information, see Using Server-Side Encryption.

    When adding a new object, you can use headers to grant ACL-based permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. By default, all objects are private. Only the owner has full access control. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

    If the bucket that you're uploading objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that contain other ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a 400 error with the error code AccessControlListNotSupported. For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide.

    If your bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account will be owned by the bucket owner.

    By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide.

    If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects. For more information about versioning, see Adding Objects to Versioning-Enabled Buckets. For information about returning the versioning state of a bucket, see GetBucketVersioning.

    For more information about related Amazon S3 APIs, see the following:

    ", + "documentation":"

    Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.

    Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket. You cannot use PutObject to only update a single piece of metadata for an existing object. You must put the entire object with updated metadata if you want to update some values.

    Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. To prevent objects from being deleted or overwritten, you can use Amazon S3 Object Lock.

    To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

    • To successfully complete the PutObject request, you must have the s3:PutObject in your IAM permissions.

    • To successfully change the objects acl of your PutObject request, you must have the s3:PutObjectAcl in your IAM permissions.

    • To successfully set the tag-set with your PutObject request, you must have the s3:PutObjectTagging in your IAM permissions.

    • The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon S3 User Guide.

    You have four mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest by using server-side encryption with other key options. For more information, see Using Server-Side Encryption.

    When adding a new object, you can use headers to grant ACL-based permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. By default, all objects are private. Only the owner has full access control. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

    If the bucket that you're uploading objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that contain other ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a 400 error with the error code AccessControlListNotSupported. For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide.

    If your bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account will be owned by the bucket owner.

    By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide.

    If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects. For more information about versioning, see Adding Objects to Versioning-Enabled Buckets. For information about returning the versioning state of a bucket, see GetBucketVersioning.

    For more information about related Amazon S3 APIs, see the following:

    ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":false @@ -1118,7 +1118,7 @@ {"shape":"ObjectAlreadyInActiveTierError"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectRestore.html", - "documentation":"

    Restores an archived copy of an object back into Amazon S3

    This action is not supported by Amazon S3 on Outposts.

    This action performs the following types of requests:

    • select - Perform a select query on an archived object

    • restore an archive - Restore an archived object

    For more information about the S3 structure in the request body, see the following:

    Define the SQL expression for the SELECT type of restoration for your query in the request body's SelectParameters structure. You can use expressions like the following examples.

    • The following expression returns all records from the specified object.

      SELECT * FROM Object

    • Assuming that you are not using any headers for data stored in the object, you can specify columns with positional headers.

      SELECT s._1, s._2 FROM Object s WHERE s._3 > 100

    • If you have headers and you set the fileHeaderInfo in the CSV structure in the request body to USE, you can specify headers in the query. (If you set the fileHeaderInfo field to IGNORE, the first row is skipped for the query.) You cannot mix ordinal positions with header column names.

      SELECT s.Id, s.FirstName, s.SSN FROM S3Object s

    When making a select request, you can also do the following:

    • To expedite your queries, specify the Expedited tier. For more information about tiers, see \"Restoring Archives,\" later in this topic.

    • Specify details about the data serialization format of both the input object that is being queried and the serialization of the CSV-encoded query results.

    The following are additional important facts about the select feature:

    • The output results are new Amazon S3 objects. Unlike archive retrievals, they are stored until explicitly deleted-manually or through a lifecycle configuration.

    • You can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't duplicate requests, so avoid issuing duplicate requests.

    • Amazon S3 accepts a select request even if the object has already been restored. A select request doesn’t return error response 409.

    Permissions

    To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.

    Restoring objects

    Objects that you archive to the S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first initiate a restore request, and then wait until a temporary copy of the object is available. If you want a permanent copy of the object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket. To access an archived object, you must restore the object for the duration (number of days) that you specify. For objects in the Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier.

    To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.

    When restoring an archived object, you can specify one of the following data access tier options in the Tier element of the request body:

    • Expedited - Expedited retrievals allow you to quickly access your data stored in the S3 Glacier Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for restoring archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.

    • Standard - Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering.

    • Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals typically finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost retrieval option when restoring objects from S3 Glacier Deep Archive. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.

    For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon S3 User Guide.

    You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon S3 User Guide.

    To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon S3 User Guide.

    After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.

    If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon S3 User Guide.

    Responses

    A successful action returns either the 200 OK or 202 Accepted status code.

    • If the object is not previously restored, then Amazon S3 returns 202 Accepted in the response.

    • If the object is previously restored, Amazon S3 returns 200 OK in the response.

    • Special errors:

      • Code: RestoreAlreadyInProgress

      • Cause: Object restore is already in progress. (This error does not apply to SELECT type requests.)

      • HTTP Status Code: 409 Conflict

      • SOAP Fault Code Prefix: Client

      • Code: GlacierExpeditedRetrievalNotAvailable

      • Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.)

      • HTTP Status Code: 503

      • SOAP Fault Code Prefix: N/A

    The following operations are related to RestoreObject:

    ", + "documentation":"

    Restores an archived copy of an object back into Amazon S3

    This action is not supported by Amazon S3 on Outposts.

    This action performs the following types of requests:

    • select - Perform a select query on an archived object

    • restore an archive - Restore an archived object

    For more information about the S3 structure in the request body, see the following:

    Define the SQL expression for the SELECT type of restoration for your query in the request body's SelectParameters structure. You can use expressions like the following examples.

    • The following expression returns all records from the specified object.

      SELECT * FROM Object

    • Assuming that you are not using any headers for data stored in the object, you can specify columns with positional headers.

      SELECT s._1, s._2 FROM Object s WHERE s._3 > 100

    • If you have headers and you set the fileHeaderInfo in the CSV structure in the request body to USE, you can specify headers in the query. (If you set the fileHeaderInfo field to IGNORE, the first row is skipped for the query.) You cannot mix ordinal positions with header column names.

      SELECT s.Id, s.FirstName, s.SSN FROM S3Object s

    When making a select request, you can also do the following:

    • To expedite your queries, specify the Expedited tier. For more information about tiers, see \"Restoring Archives,\" later in this topic.

    • Specify details about the data serialization format of both the input object that is being queried and the serialization of the CSV-encoded query results.

    The following are additional important facts about the select feature:

    • The output results are new Amazon S3 objects. Unlike archive retrievals, they are stored until explicitly deleted-manually or through a lifecycle configuration.

    • You can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't duplicate requests, so avoid issuing duplicate requests.

    • Amazon S3 accepts a select request even if the object has already been restored. A select request doesn’t return error response 409.

    Permissions

    To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.

    Restoring objects

    Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first initiate a restore request, and then wait until a temporary copy of the object is available. If you want a permanent copy of the object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket. To access an archived object, you must restore the object for the duration (number of days) that you specify. For objects in the Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier.

    To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.

    When restoring an archived object, you can specify one of the following data access tier options in the Tier element of the request body:

    • Expedited - Expedited retrievals allow you to quickly access your data stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for restoring archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.

    • Standard - Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering.

    • Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals typically finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost retrieval option when restoring objects from S3 Glacier Deep Archive. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.

    For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon S3 User Guide.

    You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon S3 User Guide.

    To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon S3 User Guide.

    After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.

    If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon S3 User Guide.

    Responses

    A successful action returns either the 200 OK or 202 Accepted status code.

    • If the object is not previously restored, then Amazon S3 returns 202 Accepted in the response.

    • If the object is previously restored, Amazon S3 returns 200 OK in the response.

    • Special errors:

      • Code: RestoreAlreadyInProgress

      • Cause: Object restore is already in progress. (This error does not apply to SELECT type requests.)

      • HTTP Status Code: 409 Conflict

      • SOAP Fault Code Prefix: Client

      • Code: GlacierExpeditedRetrievalNotAvailable

      • Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.)

      • HTTP Status Code: 503

      • SOAP Fault Code Prefix: N/A

    The following operations are related to RestoreObject:

    ", "alias":"PostObjectRestore", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", @@ -1137,7 +1137,7 @@ "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} }, "output":{"shape":"SelectObjectContentOutput"}, - "documentation":"

    This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.

    This action is not supported by Amazon S3 on Outposts.

    For more information about Amazon S3 Select, see Selecting Content from Objects and SELECT Command in the Amazon S3 User Guide.

    Permissions

    You must have s3:GetObject permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon S3 User Guide.

    Object Data Formats

    You can use Amazon S3 Select to query objects that have the following format properties:

    • CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format.

    • UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports.

    • GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects.

    • Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption.

      For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.

      For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide.

    Working with the Response Body

    Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding header with chunked as its value in the response. For more information, see Appendix: SelectObjectContent Response.

    GetObject Support

    The SelectObjectContent action does not support the following GetObject functionality. For more information, see GetObject.

    • Range: Although you can specify a scan range for an Amazon S3 Select request (see SelectObjectContentRequest - ScanRange in the request parameters), you cannot specify the range of bytes of an object to return.

    • GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. For more information, about storage classes see Storage Classes in the Amazon S3 User Guide.

    Special Errors

    For a list of special errors for this operation, see List of SELECT Object Content Error Codes

    The following operations are related to SelectObjectContent:

    " + "documentation":"

    This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.

    This action is not supported by Amazon S3 on Outposts.

    For more information about Amazon S3 Select, see Selecting Content from Objects and SELECT Command in the Amazon S3 User Guide.

    Permissions

    You must have s3:GetObject permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon S3 User Guide.

    Object Data Formats

    You can use Amazon S3 Select to query objects that have the following format properties:

    • CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format.

    • UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports.

    • GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects.

    • Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption.

      For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.

      For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide.

    Working with the Response Body

    Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding header with chunked as its value in the response. For more information, see Appendix: SelectObjectContent Response.

    GetObject Support

    The SelectObjectContent action does not support the following GetObject functionality. For more information, see GetObject.

    • Range: Although you can specify a scan range for an Amazon S3 Select request (see SelectObjectContentRequest - ScanRange in the request parameters), you cannot specify the range of bytes of an object to return.

    • The GLACIER, DEEP_ARCHIVE, and REDUCED_REDUNDANCY storage classes, or the ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class: You cannot query objects in the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS or DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class. For more information about storage classes, see Using Amazon S3 storage classes in the Amazon S3 User Guide.

    Special Errors

    For a list of special errors for this operation, see List of SELECT Object Content Error Codes

    The following operations are related to SelectObjectContent:

    " }, "UploadPart":{ "name":"UploadPart", @@ -1782,7 +1782,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", + "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -1794,13 +1794,13 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption customer managed key that was used for the object.

    ", + "documentation":"

    If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

    Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).

    ", + "documentation":"

    Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS).

    ", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -2010,7 +2010,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", + "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

    ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -2028,7 +2028,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption customer managed key that was used for the object.

    ", + "documentation":"

    If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -2040,7 +2040,7 @@ }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

    Indicates whether the copied object uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).

    ", + "documentation":"

    Indicates whether the copied object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS).

    ", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -2195,7 +2195,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", + "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

    ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -2231,7 +2231,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    Specifies the Amazon Web Services KMS key ID to use for object encryption. All GET and PUT requests for an object protected by Amazon Web Services KMS will fail if not made via SSL or using SigV4. For information about configuring using any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.

    ", + "documentation":"

    Specifies the KMS key ID to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -2243,7 +2243,7 @@ }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

    Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

    Specifying this header with a COPY action doesn’t affect bucket-level settings for S3 Bucket Key.

    ", + "documentation":"

    Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

    Specifying this header with a COPY action doesn’t affect bucket-level settings for S3 Bucket Key.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -2502,7 +2502,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", + "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -2520,7 +2520,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption customer managed key that was used for the object.

    ", + "documentation":"

    If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -2532,7 +2532,7 @@ }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

    Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).

    ", + "documentation":"

    Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS).

    ", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -2643,7 +2643,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", + "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -2679,7 +2679,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    Specifies the ID of the symmetric encryption customer managed key to use for object encryption. All GET and PUT requests for an object protected by Amazon Web Services KMS will fail if not made via SSL or using SigV4. For information about configuring using any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.

    ", + "documentation":"

    Specifies the ID of the symmetric encryption customer managed key to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -2691,7 +2691,7 @@ }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

    Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

    Specifying this header with an object action doesn’t affect bucket-level settings for S3 Bucket Key.

    ", + "documentation":"

    Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

    Specifying this header with an object action doesn’t affect bucket-level settings for S3 Bucket Key.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -2769,7 +2769,7 @@ "members":{ "Objects":{ "shape":"ObjectIdentifierList", - "documentation":"

    The objects to delete.

    ", + "documentation":"

    The object to delete.

    ", "locationName":"Object" }, "Quiet":{ @@ -3397,7 +3397,7 @@ }, "KMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    If the encryption type is aws:kms, this optional value specifies the ID of the symmetric encryption customer managed key to use for encryption of job results. Amazon S3 only supports symmetric encryption KMS keys. For more information, see Asymmetric keys in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer Guide.

    " + "documentation":"

    If the encryption type is aws:kms, this optional value specifies the ID of the symmetric encryption customer managed key to use for encryption of job results. Amazon S3 only supports symmetric encryption KMS keys. For more information, see Asymmetric keys in KMS in the Amazon Web Services Key Management Service Developer Guide.

    " }, "KMSContext":{ "shape":"KMSContext", @@ -3591,6 +3591,11 @@ "Status":{ "shape":"BucketAccelerateStatus", "documentation":"

    The accelerate configuration of the bucket.

    " + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" } } }, @@ -3610,6 +3615,11 @@ "documentation":"

    The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

    ", "location":"header", "locationName":"x-amz-expected-bucket-owner" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" } } }, @@ -4648,7 +4658,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", + "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

    ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -4672,13 +4682,13 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption customer managed key that was used for the object.

    ", + "documentation":"

    If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

    Indicates whether the object uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).

    ", + "documentation":"

    Indicates whether the object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS).

    ", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -5270,7 +5280,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", + "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

    ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -5294,13 +5304,13 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption customer managed key that was used for the object.

    ", + "documentation":"

    If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

    Indicates whether the object uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).

    ", + "documentation":"

    Indicates whether the object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS).

    ", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -6196,6 +6206,11 @@ "EncodingType":{ "shape":"EncodingType", "documentation":"

    Encoding type used by Amazon S3 to encode object keys in the response.

    If you specify encoding-type request parameter, Amazon S3 includes this element in the response, and returns encoded key name values in the following response elements:

    Delimiter, KeyMarker, Prefix, NextKeyMarker, Key.

    " + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" } } }, @@ -6250,6 +6265,11 @@ "documentation":"

    The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

    ", "location":"header", "locationName":"x-amz-expected-bucket-owner" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" } } }, @@ -6309,6 +6329,11 @@ "EncodingType":{ "shape":"EncodingType", "documentation":"

    Encoding type used by Amazon S3 to encode object key names in the XML response.

    If you specify encoding-type request parameter, Amazon S3 includes this element in the response, and returns encoded key name values in the following response elements:

    KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter.

    " + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" } } }, @@ -6363,6 +6388,11 @@ "documentation":"

    The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied).

    ", "location":"header", "locationName":"x-amz-expected-bucket-owner" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" } } }, @@ -6408,6 +6438,11 @@ "EncodingType":{ "shape":"EncodingType", "documentation":"

    Encoding type used by Amazon S3 to encode object keys in the response.

    " + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" } } }, @@ -6515,6 +6550,11 @@ "StartAfter":{ "shape":"StartAfter", "documentation":"

    If StartAfter was sent with the request, it is included in the response.

    " + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" } } }, @@ -8637,7 +8677,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", + "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

    ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -8661,7 +8701,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    If x-amz-server-side-encryption is has a valid value of aws:kms, this header specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption customer managed key that was used for the object.

    ", + "documentation":"

    If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, this header specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -8673,7 +8713,7 @@ }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

    Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).

    ", + "documentation":"

    Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS).

    ", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -8825,7 +8865,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", + "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

    ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -8861,7 +8901,7 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    If x-amz-server-side-encryption has a valid value of aws:kms, this header specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption customer managed key that was used for the object. If you specify x-amz-server-side-encryption:aws:kms, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key to protect the data. If the KMS key does not exist in the same account issuing the command, you must use the full ARN and not just the ID.

    ", + "documentation":"

    If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, this header specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data. If the KMS key does not exist in the same account that's issuing the command, you must use the full ARN and not just the ID.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, @@ -8873,7 +8913,7 @@ }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

    Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

    Specifying this header with a PUT action doesn’t affect bucket-level settings for S3 Bucket Key.

    ", + "documentation":"

    Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

    Specifying this header with a PUT action doesn’t affect bucket-level settings for S3 Bucket Key.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -9655,7 +9695,7 @@ "members":{ "KeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    Specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption customer managed key to use for encrypting inventory reports.

    " + "documentation":"

    Specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key to use for encrypting inventory reports.

    " } }, "documentation":"

    Specifies the use of SSE-KMS to encrypt delivered inventory reports.

    ", @@ -9834,7 +9874,8 @@ "type":"string", "enum":[ "AES256", - "aws:kms" + "aws:kms", + "aws:kms:dsse" ] }, "ServerSideEncryptionByDefault":{ @@ -10206,7 +10247,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", + "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -10224,13 +10265,13 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption customer managed key that was used for the object.

    ", + "documentation":"

    If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

    Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).

    ", + "documentation":"

    Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS).

    ", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, @@ -10373,7 +10414,7 @@ "members":{ "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", + "documentation":"

    The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

    ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -10421,13 +10462,13 @@ }, "SSEKMSKeyId":{ "shape":"SSEKMSKeyId", - "documentation":"

    If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption customer managed key was used for the object.

    ", + "documentation":"

    If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key was used for the object.

    ", "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, "BucketKeyEnabled":{ "shape":"BucketKeyEnabled", - "documentation":"

    Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Amazon Web Services KMS (SSE-KMS).

    ", + "documentation":"

    Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS).

    ", "location":"header", "locationName":"x-amz-server-side-encryption-bucket-key-enabled" }, diff --git a/services/s3control/pom.xml b/services/s3control/pom.xml index dd160776993e..b5da7af558a2 100644 --- a/services/s3control/pom.xml +++ b/services/s3control/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT s3control AWS Java SDK :: Services :: Amazon S3 Control diff --git a/services/s3outposts/pom.xml b/services/s3outposts/pom.xml index b8eb776dfb3d..ddf08293a83f 100644 --- a/services/s3outposts/pom.xml +++ b/services/s3outposts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT s3outposts AWS Java SDK :: Services :: S3 Outposts diff --git a/services/sagemaker/pom.xml b/services/sagemaker/pom.xml index 92064e2c8020..41cb7172deff 100644 --- a/services/sagemaker/pom.xml +++ b/services/sagemaker/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 sagemaker diff --git a/services/sagemaker/src/main/resources/codegen-resources/service-2.json b/services/sagemaker/src/main/resources/codegen-resources/service-2.json index 23671aa4ca79..33d19e91f155 100644 --- a/services/sagemaker/src/main/resources/codegen-resources/service-2.json +++ b/services/sagemaker/src/main/resources/codegen-resources/service-2.json @@ -137,7 +137,7 @@ {"shape":"ResourceInUse"}, {"shape":"ResourceLimitExceeded"} ], - "documentation":"

    Creates an Autopilot job.

    Find the best-performing model after you run an Autopilot job by calling DescribeAutoMLJob.

    For information about how to use Autopilot, see Automate Model Development with Amazon SageMaker Autopilot.

    " + "documentation":"

    Creates an Autopilot job also referred to as Autopilot experiment or AutoML job.

    Find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob.

    CreateAutoMLJob only accepts tabular input data. We recommend using CreateAutoMLJobV2 for all problem types. CreateAutoMLJobV2 can process the same tabular data as its previous version CreateAutoMLJob, as well as non-tabular data for problem types such as image or text classification.

    Find guidelines about how to migrate CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

    " }, "CreateAutoMLJobV2":{ "name":"CreateAutoMLJobV2", @@ -151,7 +151,7 @@ {"shape":"ResourceInUse"}, {"shape":"ResourceLimitExceeded"} ], - "documentation":"

    Creates an Amazon SageMaker AutoML job that uses non-tabular data such as images or text for Computer Vision or Natural Language Processing problems.

    Find the resulting model after you run an AutoML job V2 by calling DescribeAutoMLJobV2.

    To create an AutoMLJob using tabular data, see CreateAutoMLJob.

    This API action is callable through SageMaker Canvas only. Calling it directly from the CLI or an SDK results in an error.

    " + "documentation":"

    Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2.

    We recommend using CreateAutoMLJobV2 for all problem types. CreateAutoMLJobV2 can process the same tabular data as its previous version CreateAutoMLJob, as well as non-tabular data for problem types such as image or text classification.

    Find guidelines about how to migrate CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

    For the list of available problem types supported by CreateAutoMLJobV2, see AutoMLProblemTypeConfig.

    Find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2. Calling DescribeAutoMLJob on a AutoML job V2 results in an error.

    " }, "CreateCodeRepository":{ "name":"CreateCodeRepository", @@ -1406,7 +1406,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

    Returns information about an Amazon SageMaker AutoML job.

    " + "documentation":"

    Returns information about an AutoML job created by calling CreateAutoMLJob.

    " }, "DescribeAutoMLJobV2":{ "name":"DescribeAutoMLJobV2", @@ -1419,7 +1419,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

    Returns information about an Amazon SageMaker AutoML V2 job.

    This API action is callable through SageMaker Canvas only. Calling it directly from the CLI or an SDK results in an error.

    " + "documentation":"

    Returns information about an AutoML job V2 created by calling CreateAutoMLJobV2.

    " }, "DescribeCodeRepository":{ "name":"DescribeCodeRepository", @@ -4787,7 +4787,7 @@ }, "InferenceContainerDefinitions":{ "shape":"AutoMLInferenceContainerDefinitions", - "documentation":"

    The mapping of all supported processing unit (CPU, GPU, etc...) to inference container definitions for the candidate. This field is populated for the V2 API only (for example, for jobs created by calling CreateAutoMLJobV2).

    " + "documentation":"

    The mapping of all supported processing unit (CPU, GPU, etc...) to inference container definitions for the candidate. This field is populated for the AutoML jobs V2 (for example, for jobs created by calling CreateAutoMLJobV2) related to image or text classification problem types only.

    " } }, "documentation":"

    Information about a candidate produced by an AutoML training job, including its status, steps, and other properties.

    " @@ -4920,7 +4920,7 @@ "documentation":"

    The validation fraction (optional) is a float that specifies the portion of the training dataset to be used for validation. The default value is 0.2, and values must be greater than 0 and less than 1. We recommend setting this value to be less than 0.5.

    " } }, - "documentation":"

    This structure specifies how to split the data into train and validation datasets.

    If you are using the V1 API (for example CreateAutoMLJob) or the V2 API for Natural Language Processing problems (for example CreateAutoMLJobV2 with a TextClassificationJobConfig problem type), the validation and training datasets must contain the same headers. Also, for V1 API jobs, the validation dataset must be less than 2 GB in size.

    " + "documentation":"

    This structure specifies how to split the data into train and validation datasets.

    The validation and training datasets must contain the same headers. For jobs created by calling CreateAutoMLJob, the validation dataset must be less than 2 GB in size.

    " }, "AutoMLFailureReason":{ "type":"string", @@ -4974,29 +4974,29 @@ }, "ContentType":{ "shape":"ContentType", - "documentation":"

    The content type of the data from the input source. The following are the allowed content types for different problems:

    • ImageClassification: image/png, image/jpeg, image/*

    • TextClassification: text/csv;header=present

    " + "documentation":"

    The content type of the data from the input source. The following are the allowed content types for different problems:

    • For Tabular problem types: text/csv;header=present or x-application/vnd.amazon+parquet. The default value is text/csv;header=present.

    • For ImageClassification: image/png, image/jpeg, or image/*. The default value is image/*.

    • For TextClassification: text/csv;header=present or x-application/vnd.amazon+parquet. The default value is text/csv;header=present.

    " }, "CompressionType":{ "shape":"CompressionType", - "documentation":"

    The allowed compression types depend on the input format. We allow the compression type Gzip for S3Prefix inputs only. For all other inputs, the compression type should be None. If no compression type is provided, we default to None.

    " + "documentation":"

    The allowed compression types depend on the input format and problem type. We allow the compression type Gzip for S3Prefix inputs on tabular data only. For all other inputs, the compression type should be None. If no compression type is provided, we default to None.

    " }, "DataSource":{ "shape":"AutoMLDataSource", - "documentation":"

    The data source for an AutoML channel.

    " + "documentation":"

    The data source for an AutoML channel (Required).

    " } }, - "documentation":"

    A channel is a named input source that training algorithms can consume. This channel is used for the non tabular training data of an AutoML job using the V2 API. For tabular training data, see AutoMLChannel. For more information, see Channel.

    " + "documentation":"

    A channel is a named input source that training algorithms can consume. This channel is used for AutoML jobs V2 (jobs created by calling CreateAutoMLJobV2).

    " }, "AutoMLJobCompletionCriteria":{ "type":"structure", "members":{ "MaxCandidates":{ "shape":"MaxCandidates", - "documentation":"

    The maximum number of times a training job is allowed to run.

    For V2 jobs (jobs created by calling CreateAutoMLJobV2), the supported value is 1.

    " + "documentation":"

    The maximum number of times a training job is allowed to run.

    For job V2s (jobs created by calling CreateAutoMLJobV2), the supported value is 1.

    " }, "MaxRuntimePerTrainingJobInSeconds":{ "shape":"MaxRuntimePerTrainingJobInSeconds", - "documentation":"

    The maximum time, in seconds, that each training job executed inside hyperparameter tuning is allowed to run as part of a hyperparameter tuning job. For more information, see the StoppingCondition used by the CreateHyperParameterTuningJob action.

    For V2 jobs (jobs created by calling CreateAutoMLJobV2), this field controls the runtime of the job candidate.

    " + "documentation":"

    The maximum time, in seconds, that each training job executed inside hyperparameter tuning is allowed to run as part of a hyperparameter tuning job. For more information, see the StoppingCondition used by the CreateHyperParameterTuningJob action.

    For job V2s (jobs created by calling CreateAutoMLJobV2), this field controls the runtime of the job candidate.

    " }, "MaxAutoMLJobRuntimeInSeconds":{ "shape":"MaxAutoMLJobRuntimeInSeconds", @@ -5026,7 +5026,7 @@ }, "Mode":{ "shape":"AutoMLMode", - "documentation":"

    The method that Autopilot uses to train the data. You can either specify the mode manually or let Autopilot choose for you based on the dataset size by selecting AUTO. In AUTO mode, Autopilot chooses ENSEMBLING for datasets smaller than 100 MB, and HYPERPARAMETER_TUNING for larger ones.

    The ENSEMBLING mode uses a multi-stack ensemble model to predict classification and regression tasks directly from your dataset. This machine learning mode combines several base models to produce an optimal predictive model. It then uses a stacking ensemble method to combine predictions from contributing members. A multi-stack ensemble model can provide better performance over a single model by combining the predictive capabilities of multiple models. See Autopilot algorithm support for a list of algorithms supported by ENSEMBLING mode.

    The HYPERPARAMETER_TUNING (HPO) mode uses the best hyperparameters to train the best version of a model. HPO automatically selects an algorithm for the type of problem you want to solve. Then HPO finds the best hyperparameters according to your objective metric. See Autopilot algorithm support for a list of algorithms supported by HYPERPARAMETER_TUNING mode.

    " + "documentation":"

    The method that Autopilot uses to train the data. You can either specify the mode manually or let Autopilot choose for you based on the dataset size by selecting AUTO. In AUTO mode, Autopilot chooses ENSEMBLING for datasets smaller than 100 MB, and HYPERPARAMETER_TUNING for larger ones.

    The ENSEMBLING mode uses a multi-stack ensemble model to predict classification and regression tasks directly from your dataset. This machine learning mode combines several base models to produce an optimal predictive model. It then uses a stacking ensemble method to combine predictions from contributing members. A multi-stack ensemble model can provide better performance over a single model by combining the predictive capabilities of multiple models. See Autopilot algorithm support for a list of algorithms supported by ENSEMBLING mode.

    The HYPERPARAMETER_TUNING (HPO) mode uses the best hyperparameters to train the best version of a model. HPO automatically selects an algorithm for the type of problem you want to solve. Then HPO finds the best hyperparameters according to your objective metric. See Autopilot algorithm support for a list of algorithms supported by HYPERPARAMETER_TUNING mode.

    " } }, "documentation":"

    A collection of settings used for an AutoML job.

    " @@ -5049,10 +5049,10 @@ "members":{ "MetricName":{ "shape":"AutoMLMetricEnum", - "documentation":"

    The name of the objective metric used to measure the predictive quality of a machine learning system. During training, the model's parameters are updated iteratively to optimize its performance based on the feedback provided by the objective metric when evaluating the model on the validation dataset.

    For the list of all available metrics supported by Autopilot, see Autopilot metrics.

    If you do not specify a metric explicitly, the default behavior is to automatically use:

    • MSE: for regression.

    • F1: for binary classification

    • Accuracy: for multiclass classification.

    " + "documentation":"

    The name of the objective metric used to measure the predictive quality of a machine learning system. During training, the model's parameters are updated iteratively to optimize its performance based on the feedback provided by the objective metric when evaluating the model on the validation dataset.

    For the list of all available metrics supported by Autopilot, see Autopilot metrics.

    If you do not specify a metric explicitly, the default behavior is to automatically use:

    • For tabular problem types:

      • Regression: MSE.

      • Binary classification: F1.

      • Multiclass classification: Accuracy.

    • For image or text classification problem types: Accuracy

    " } }, - "documentation":"

    Specifies a metric to minimize or maximize as the objective of a job. V2 API jobs (for example jobs created by calling CreateAutoMLJobV2), support Accuracy only.

    " + "documentation":"

    Specifies a metric to minimize or maximize as the objective of a job.

    " }, "AutoMLJobObjectiveType":{ "type":"string", @@ -5250,14 +5250,37 @@ "members":{ "ImageClassificationJobConfig":{ "shape":"ImageClassificationJobConfig", - "documentation":"

    Settings used to configure an AutoML job using the V2 API for the image classification problem type.

    " + "documentation":"

    Settings used to configure an AutoML job V2 for the image classification problem type.

    " }, "TextClassificationJobConfig":{ "shape":"TextClassificationJobConfig", - "documentation":"

    Settings used to configure an AutoML job using the V2 API for the text classification problem type.

    " + "documentation":"

    Settings used to configure an AutoML job V2 for the text classification problem type.

    " + }, + "TabularJobConfig":{ + "shape":"TabularJobConfig", + "documentation":"

    Settings used to configure an AutoML job V2 for a tabular problem type (regression, classification).

    " } }, - "documentation":"

    A collection of settings specific to the problem type used to configure an AutoML job using the V2 API. There must be one and only one config of the following type.

    ", + "documentation":"

    A collection of settings specific to the problem type used to configure an AutoML job V2. There must be one and only one config of the following type.

    ", + "union":true + }, + "AutoMLProblemTypeConfigName":{ + "type":"string", + "enum":[ + "ImageClassification", + "TextClassification", + "Tabular" + ] + }, + "AutoMLProblemTypeResolvedAttributes":{ + "type":"structure", + "members":{ + "TabularResolvedAttributes":{ + "shape":"TabularResolvedAttributes", + "documentation":"

    Defines the resolved attributes for the TABULAR problem type.

    " + } + }, + "documentation":"

    The resolved attributes specific to the problem type of an AutoML job V2.

    ", "union":true }, "AutoMLProcessingUnit":{ @@ -5267,6 +5290,18 @@ "GPU" ] }, + "AutoMLResolvedAttributes":{ + "type":"structure", + "members":{ + "AutoMLJobObjective":{"shape":"AutoMLJobObjective"}, + "CompletionCriteria":{"shape":"AutoMLJobCompletionCriteria"}, + "AutoMLProblemTypeResolvedAttributes":{ + "shape":"AutoMLProblemTypeResolvedAttributes", + "documentation":"

    Defines the resolved attributes specific to a problem type.

    " + } + }, + "documentation":"

    The resolved attributes used to configure an AutoML job V2.

    " + }, "AutoMLS3DataSource":{ "type":"structure", "required":[ @@ -5679,6 +5714,16 @@ "type":"string", "min":1 }, + "CandidateGenerationConfig":{ + "type":"structure", + "members":{ + "AlgorithmsConfig":{ + "shape":"AutoMLAlgorithmsConfig", + "documentation":"

    Stores the configuration information for the selection of algorithms used to train model candidates on tabular data.

    The list of available algorithms to choose from depends on the training mode set in TabularJobConfig.Mode .

    • AlgorithmsConfig should not be set in AUTO training mode.

    • When AlgorithmsConfig is provided, one AutoMLAlgorithms attribute must be set and one only.

      If the list of algorithms provided as values for AutoMLAlgorithms is empty, CandidateGenerationConfig uses the full set of algorithms for the given training mode.

    • When AlgorithmsConfig is not provided, CandidateGenerationConfig uses the full set of algorithms for the given training mode.

    For the list of all algorithms per problem type and training mode, see AutoMLAlgorithmConfig.

    For more information on each algorithm, see the Algorithm support section in Autopilot developer guide.

    " + } + }, + "documentation":"

    Stores the configuration information for how model candidates are generated using an AutoML job V2.

    " + }, "CandidateName":{ "type":"string", "max":64, @@ -7154,7 +7199,7 @@ }, "AutoMLJobObjective":{ "shape":"AutoMLJobObjective", - "documentation":"

    Defines the objective metric used to measure the predictive quality of an AutoML job. You provide an AutoMLJobObjective$MetricName and Autopilot infers whether to minimize or maximize it. For CreateAutoMLJobV2, only Accuracy is supported.

    " + "documentation":"

    Specifies a metric to minimize or maximize as the objective of a job. If not specified, the default objective metric depends on the problem type. See AutoMLJobObjective for the default values.

    " }, "AutoMLJobConfig":{ "shape":"AutoMLJobConfig", @@ -7204,7 +7249,7 @@ }, "AutoMLJobInputDataConfig":{ "shape":"AutoMLJobInputDataConfig", - "documentation":"

    An array of channel objects describing the input data and their location. Each channel is a named input source. Similar to InputDataConfig supported by CreateAutoMLJob. The supported formats depend on the problem type:

    • ImageClassification: S3Prefix, ManifestFile, AugmentedManifestFile

    • TextClassification: S3Prefix

    " + "documentation":"

    An array of channel objects describing the input data and their location. Each channel is a named input source. Similar to InputDataConfig supported by CreateAutoMLJob. The supported formats depend on the problem type:

    • For Tabular problem types: S3Prefix, ManifestFile.

    • For ImageClassification: S3Prefix, ManifestFile, AugmentedManifestFile.

    • For TextClassification: S3Prefix.

    " }, "OutputDataConfig":{ "shape":"AutoMLOutputDataConfig", @@ -7212,7 +7257,7 @@ }, "AutoMLProblemTypeConfig":{ "shape":"AutoMLProblemTypeConfig", - "documentation":"

    Defines the configuration settings of one of the supported problem types.

    " + "documentation":"

    Defines the configuration settings of one of the supported problem types.

    For tabular problem types, you must either specify the type of supervised learning problem in AutoMLProblemTypeConfig (TabularJobConfig.ProblemType) and provide the AutoMLJobObjective, or none at all.

    " }, "RoleArn":{ "shape":"RoleArn", @@ -7228,7 +7273,7 @@ }, "AutoMLJobObjective":{ "shape":"AutoMLJobObjective", - "documentation":"

    Specifies a metric to minimize or maximize as the objective of a job. For CreateAutoMLJobV2, only Accuracy is supported.

    " + "documentation":"

    Specifies a metric to minimize or maximize as the objective of a job. If not specified, the default objective metric depends on the problem type. For the list of default values per problem type, see AutoMLJobObjective.

    For tabular problem types, you must either provide the AutoMLJobObjective and indicate the type of supervised learning problem in AutoMLProblemTypeConfig (TabularJobConfig.ProblemType), or none.

    " }, "ModelDeployConfig":{ "shape":"ModelDeployConfig", @@ -7236,7 +7281,7 @@ }, "DataSplitConfig":{ "shape":"AutoMLDataSplitConfig", - "documentation":"

    This structure specifies how to split the data into train and validation datasets.

    If you are using the V1 API (for example CreateAutoMLJob) or the V2 API for Natural Language Processing problems (for example CreateAutoMLJobV2 with a TextClassificationJobConfig problem type), the validation and training datasets must contain the same headers. Also, for V1 API jobs, the validation dataset must be less than 2 GB in size.

    " + "documentation":"

    This structure specifies how to split the data into train and validation datasets.

    The validation and training datasets must contain the same headers. For jobs created by calling CreateAutoMLJob, the validation dataset must be less than 2 GB in size.

    " } } }, @@ -11089,7 +11134,7 @@ }, "ResolvedAttributes":{ "shape":"ResolvedAttributes", - "documentation":"

    Contains ProblemType, AutoMLJobObjective, and CompletionCriteria. If you do not provide these values, they are auto-inferred. If you do provide them, the values used are the ones you provide.

    " + "documentation":"

    Contains ProblemType, AutoMLJobObjective, and CompletionCriteria. If you do not provide these values, they are inferred.

    " }, "ModelDeployConfig":{ "shape":"ModelDeployConfig", @@ -11107,7 +11152,7 @@ "members":{ "AutoMLJobName":{ "shape":"AutoMLJobName", - "documentation":"

    Requests information about an AutoML V2 job using its unique name.

    " + "documentation":"

    Requests information about an AutoML job V2 using its unique name.

    " } } }, @@ -11127,11 +11172,11 @@ "members":{ "AutoMLJobName":{ "shape":"AutoMLJobName", - "documentation":"

    Returns the name of the AutoML V2 job.

    " + "documentation":"

    Returns the name of the AutoML job V2.

    " }, "AutoMLJobArn":{ "shape":"AutoMLJobArn", - "documentation":"

    Returns the Amazon Resource Name (ARN) of the AutoML V2 job.

    " + "documentation":"

    Returns the Amazon Resource Name (ARN) of the AutoML job V2.

    " }, "AutoMLJobInputDataConfig":{ "shape":"AutoMLJobInputDataConfig", @@ -11151,15 +11196,15 @@ }, "AutoMLProblemTypeConfig":{ "shape":"AutoMLProblemTypeConfig", - "documentation":"

    Returns the configuration settings of the problem type set for the AutoML V2 job.

    " + "documentation":"

    Returns the configuration settings of the problem type set for the AutoML job V2.

    " }, "CreationTime":{ "shape":"Timestamp", - "documentation":"

    Returns the creation time of the AutoML V2 job.

    " + "documentation":"

    Returns the creation time of the AutoML job V2.

    " }, "EndTime":{ "shape":"Timestamp", - "documentation":"

    Returns the end time of the AutoML V2 job.

    " + "documentation":"

    Returns the end time of the AutoML job V2.

    " }, "LastModifiedTime":{ "shape":"Timestamp", @@ -11167,11 +11212,11 @@ }, "FailureReason":{ "shape":"AutoMLFailureReason", - "documentation":"

    Returns the reason for the failure of the AutoML V2 job, when applicable.

    " + "documentation":"

    Returns the reason for the failure of the AutoML job V2, when applicable.

    " }, "PartialFailureReasons":{ "shape":"AutoMLPartialFailureReasons", - "documentation":"

    Returns a list of reasons for partial failures within an AutoML V2 job.

    " + "documentation":"

    Returns a list of reasons for partial failures within an AutoML job V2.

    " }, "BestCandidate":{ "shape":"AutoMLCandidate", @@ -11179,11 +11224,11 @@ }, "AutoMLJobStatus":{ "shape":"AutoMLJobStatus", - "documentation":"

    Returns the status of the AutoML V2 job.

    " + "documentation":"

    Returns the status of the AutoML job V2.

    " }, "AutoMLJobSecondaryStatus":{ "shape":"AutoMLJobSecondaryStatus", - "documentation":"

    Returns the secondary status of the AutoML V2 job.

    " + "documentation":"

    Returns the secondary status of the AutoML job V2.

    " }, "ModelDeployConfig":{ "shape":"ModelDeployConfig", @@ -11200,6 +11245,15 @@ "SecurityConfig":{ "shape":"AutoMLSecurityConfig", "documentation":"

    Returns the security configuration for traffic encryption or Amazon VPC settings.

    " + }, + "AutoMLJobArtifacts":{"shape":"AutoMLJobArtifacts"}, + "ResolvedAttributes":{ + "shape":"AutoMLResolvedAttributes", + "documentation":"

    Returns the resolved attributes used by the AutoML job V2.

    " + }, + "AutoMLProblemTypeConfigName":{ + "shape":"AutoMLProblemTypeConfigName", + "documentation":"

    Returns the name of the problem type configuration set for the AutoML job V2.

    " } } }, @@ -13807,7 +13861,7 @@ "members":{ "PipelineName":{ "shape":"PipelineNameOrArn", - "documentation":"

    The name of the pipeline to describe.

    " + "documentation":"

    The name or Amazon Resource Name (ARN) of the pipeline to describe.

    " } } }, @@ -17747,7 +17801,7 @@ }, "InstanceCount":{ "shape":"TrainingInstanceCount", - "documentation":"

    The number of instances of the type specified by InstanceType. Choose an instance count larger than 1 for distributed training algorithms. See SageMaker distributed training jobs for more information.

    " + "documentation":"

    The number of instances of the type specified by InstanceType. Choose an instance count larger than 1 for distributed training algorithms. See Step 2: Launch a SageMaker Distributed Training Job Using the SageMaker Python SDK for more information.

    " }, "VolumeSizeInGB":{ "shape":"VolumeSizeInGB", @@ -18057,7 +18111,7 @@ "members":{ "InstanceType":{ "shape":"TrainingInstanceType", - "documentation":"

    The instance type used to run hyperparameter optimization tuning jobs. See descriptions of instance types for more information.

    " + "documentation":"

    The instance type used to run hyperparameter optimization tuning jobs. See descriptions of instance types for more information.

    " }, "InstanceCount":{ "shape":"TrainingInstanceCount", @@ -18203,7 +18257,7 @@ "documentation":"

    How long a job is allowed to run, or how many candidates a job is allowed to generate.

    " } }, - "documentation":"

    Stores the configuration information for the image classification problem of an AutoML job using the V2 API.

    " + "documentation":"

    Stores the configuration information for the image classification problem of an AutoML job V2.

    " }, "ImageConfig":{ "type":"structure", @@ -19772,7 +19826,7 @@ "members":{ "MaxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of AppImageConfigs to return in the response. The default value is 10.

    " + "documentation":"

    The total number of items to return in the response. If the total number of items available is more than the value specified, a NextToken is provided in the response. To resume pagination, provide the NextToken value in the as part of a subsequent call. The default value is 10.

    " }, "NextToken":{ "shape":"NextToken", @@ -19830,7 +19884,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

    Returns a list up to a specified limit.

    " + "documentation":"

    The total number of items to return in the response. If the total number of items available is more than the value specified, a NextToken is provided in the response. To resume pagination, provide the NextToken value in the as part of a subsequent call. The default value is 10.

    " }, "SortOrder":{ "shape":"SortOrder", @@ -20425,7 +20479,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

    Returns a list up to a specified limit.

    " + "documentation":"

    The total number of items to return in the response. If the total number of items available is more than the value specified, a NextToken is provided in the response. To resume pagination, provide the NextToken value in the as part of a subsequent call. The default value is 10.

    " } } }, @@ -22465,7 +22519,7 @@ "members":{ "PipelineName":{ "shape":"PipelineNameOrArn", - "documentation":"

    The name of the pipeline.

    " + "documentation":"

    The name or Amazon Resource Name (ARN) of the pipeline.

    " }, "CreatedAfter":{ "shape":"Timestamp", @@ -22699,7 +22753,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

    Returns a list up to a specified limit.

    " + "documentation":"

    The total number of items to return in the response. If the total number of items available is more than the value specified, a NextToken is provided in the response. To resume pagination, provide the NextToken value in the as part of a subsequent call. The default value is 10.

    " }, "SortOrder":{ "shape":"SortOrder", @@ -22781,7 +22835,7 @@ "members":{ "MaxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of Studio Lifecycle Configurations to return in the response. The default value is 10.

    " + "documentation":"

    The total number of items to return in the response. If the total number of items available is more than the value specified, a NextToken is provided in the response. To resume pagination, provide the NextToken value in the as part of a subsequent call. The default value is 10.

    " }, "NextToken":{ "shape":"NextToken", @@ -22826,7 +22880,7 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

    A token for getting the next set of actions, if there are any.

    " + "documentation":"

    If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results.

    " }, "StudioLifecycleConfigs":{ "shape":"StudioLifecycleConfigsList", @@ -23186,7 +23240,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

    Returns a list up to a specified limit.

    " + "documentation":"

    The total number of items to return in the response. If the total number of items available is more than the value specified, a NextToken is provided in the response. To resume pagination, provide the NextToken value in the as part of a subsequent call. The default value is 10.

    " }, "SortOrder":{ "shape":"SortOrder", @@ -26396,7 +26450,7 @@ }, "TargetDevice":{ "shape":"TargetDevice", - "documentation":"

    Identifies the target device or the machine learning instance that you want to run your model on after the compilation has completed. Alternatively, you can specify OS, architecture, and accelerator using TargetPlatform fields. It can be used instead of TargetPlatform.

    " + "documentation":"

    Identifies the target device or the machine learning instance that you want to run your model on after the compilation has completed. Alternatively, you can specify OS, architecture, and accelerator using TargetPlatform fields. It can be used instead of TargetPlatform.

    Currently ml_trn1 is available only in US East (N. Virginia) Region, and ml_inf2 is available only in US East (Ohio) Region.

    " }, "TargetPlatform":{ "shape":"TargetPlatform", @@ -30346,7 +30400,7 @@ "members":{ "PipelineName":{ "shape":"PipelineNameOrArn", - "documentation":"

    The name of the pipeline.

    " + "documentation":"

    The name or Amazon Resource Name (ARN) of the pipeline.

    " }, "PipelineExecutionDisplayName":{ "shape":"PipelineExecutionName", @@ -30796,6 +30850,52 @@ "min":1, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" }, + "TabularJobConfig":{ + "type":"structure", + "required":["TargetAttributeName"], + "members":{ + "CandidateGenerationConfig":{ + "shape":"CandidateGenerationConfig", + "documentation":"

    The configuration information of how model candidates are generated.

    " + }, + "CompletionCriteria":{"shape":"AutoMLJobCompletionCriteria"}, + "FeatureSpecificationS3Uri":{ + "shape":"S3Uri", + "documentation":"

    A URL to the Amazon S3 data source containing selected features from the input data source to run an Autopilot job V2. You can input FeatureAttributeNames (optional) in JSON format as shown below:

    { \"FeatureAttributeNames\":[\"col1\", \"col2\", ...] }.

    You can also specify the data type of the feature (optional) in the format shown below:

    { \"FeatureDataTypes\":{\"col1\":\"numeric\", \"col2\":\"categorical\" ... } }

    These column keys may not include the target column.

    In ensembling mode, Autopilot only supports the following data types: numeric, categorical, text, and datetime. In HPO mode, Autopilot can support numeric, categorical, text, datetime, and sequence.

    If only FeatureDataTypes is provided, the column keys (col1, col2,..) should be a subset of the column names in the input data.

    If both FeatureDataTypes and FeatureAttributeNames are provided, then the column keys should be a subset of the column names provided in FeatureAttributeNames.

    The key name FeatureAttributeNames is fixed. The values listed in [\"col1\", \"col2\", ...] are case sensitive and should be a list of strings containing unique values that are a subset of the column names in the input data. The list of columns provided must not include the target column.

    " + }, + "Mode":{ + "shape":"AutoMLMode", + "documentation":"

    The method that Autopilot uses to train the data. You can either specify the mode manually or let Autopilot choose for you based on the dataset size by selecting AUTO. In AUTO mode, Autopilot chooses ENSEMBLING for datasets smaller than 100 MB, and HYPERPARAMETER_TUNING for larger ones.

    The ENSEMBLING mode uses a multi-stack ensemble model to predict classification and regression tasks directly from your dataset. This machine learning mode combines several base models to produce an optimal predictive model. It then uses a stacking ensemble method to combine predictions from contributing members. A multi-stack ensemble model can provide better performance over a single model by combining the predictive capabilities of multiple models. See Autopilot algorithm support for a list of algorithms supported by ENSEMBLING mode.

    The HYPERPARAMETER_TUNING (HPO) mode uses the best hyperparameters to train the best version of a model. HPO automatically selects an algorithm for the type of problem you want to solve. Then HPO finds the best hyperparameters according to your objective metric. See Autopilot algorithm support for a list of algorithms supported by HYPERPARAMETER_TUNING mode.

    " + }, + "GenerateCandidateDefinitionsOnly":{ + "shape":"GenerateCandidateDefinitionsOnly", + "documentation":"

    Generates possible candidates without training the models. A model candidate is a combination of data preprocessors, algorithms, and algorithm parameter settings.

    " + }, + "ProblemType":{ + "shape":"ProblemType", + "documentation":"

    The type of supervised learning problem available for the model candidates of the AutoML job V2. For more information, see Amazon SageMaker Autopilot problem types.

    " + }, + "TargetAttributeName":{ + "shape":"TargetAttributeName", + "documentation":"

    The name of the target variable in supervised learning, usually represented by 'y'.

    " + }, + "SampleWeightAttributeName":{ + "shape":"SampleWeightAttributeName", + "documentation":"

    If specified, this column name indicates which column of the dataset should be treated as sample weights for use by the objective metric during the training, evaluation, and the selection of the best model. This column is not considered as a predictive feature. For more information on Autopilot metrics, see Metrics and validation.

    Sample weights should be numeric, non-negative, with larger values indicating which rows are more important than others. Data points that have invalid or no weight value are excluded.

    Support for sample weights is available in Ensembling mode only.

    " + } + }, + "documentation":"

    The collection of settings used by an AutoML job V2 for the TABULAR problem type.

    " + }, + "TabularResolvedAttributes":{ + "type":"structure", + "members":{ + "ProblemType":{ + "shape":"ProblemType", + "documentation":"

    The type of supervised learning problem available for the model candidates of the AutoML job V2 (Binary Classification, Multiclass Classification, Regression). For more information, see Amazon SageMaker Autopilot problem types.

    " + } + }, + "documentation":"

    The resolved attributes specific to the TABULAR problem type.

    " + }, "Tag":{ "type":"structure", "required":[ @@ -30854,6 +30954,8 @@ "ml_p3", "ml_g4dn", "ml_inf1", + "ml_inf2", + "ml_trn1", "ml_eia2", "jetson_tx1", "jetson_tx2", @@ -31035,14 +31137,14 @@ }, "ContentColumn":{ "shape":"ContentColumn", - "documentation":"

    The name of the column used to provide the sentences to be classified. It should not be the same as the target column.

    " + "documentation":"

    The name of the column used to provide the sentences to be classified. It should not be the same as the target column (Required).

    " }, "TargetLabelColumn":{ "shape":"TargetLabelColumn", - "documentation":"

    The name of the column used to provide the class labels. It should not be same as the content column.

    " + "documentation":"

    The name of the column used to provide the class labels. It should not be same as the content column (Required).

    " } }, - "documentation":"

    Stores the configuration information for the text classification problem of an AutoML job using the V2 API.

    " + "documentation":"

    Stores the configuration information for the text classification problem of an AutoML job V2.

    " }, "ThingName":{ "type":"string", diff --git a/services/sagemakera2iruntime/pom.xml b/services/sagemakera2iruntime/pom.xml index 25ef98034253..1cdcf912889c 100644 --- a/services/sagemakera2iruntime/pom.xml +++ b/services/sagemakera2iruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT sagemakera2iruntime AWS Java SDK :: Services :: SageMaker A2I Runtime diff --git a/services/sagemakeredge/pom.xml b/services/sagemakeredge/pom.xml index 7c85c0fe52c6..25c01128b970 100644 --- a/services/sagemakeredge/pom.xml +++ b/services/sagemakeredge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT sagemakeredge AWS Java SDK :: Services :: Sagemaker Edge diff --git a/services/sagemakerfeaturestoreruntime/pom.xml b/services/sagemakerfeaturestoreruntime/pom.xml index 293329da203b..3b2799f8fbd0 100644 --- a/services/sagemakerfeaturestoreruntime/pom.xml +++ b/services/sagemakerfeaturestoreruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT sagemakerfeaturestoreruntime AWS Java SDK :: Services :: Sage Maker Feature Store Runtime diff --git a/services/sagemakergeospatial/pom.xml b/services/sagemakergeospatial/pom.xml index 5a275c77e6b7..95697a88d398 100644 --- a/services/sagemakergeospatial/pom.xml +++ b/services/sagemakergeospatial/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT sagemakergeospatial AWS Java SDK :: Services :: Sage Maker Geospatial diff --git a/services/sagemakermetrics/pom.xml b/services/sagemakermetrics/pom.xml index c67279a1394a..6b48812edec1 100644 --- a/services/sagemakermetrics/pom.xml +++ b/services/sagemakermetrics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT sagemakermetrics AWS Java SDK :: Services :: Sage Maker Metrics diff --git a/services/sagemakerruntime/pom.xml b/services/sagemakerruntime/pom.xml index fcda135570c8..099c75b029aa 100644 --- a/services/sagemakerruntime/pom.xml +++ b/services/sagemakerruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT sagemakerruntime AWS Java SDK :: Services :: SageMaker Runtime diff --git a/services/savingsplans/pom.xml b/services/savingsplans/pom.xml index a65afa1cbf09..59eb6d7b87cc 100644 --- a/services/savingsplans/pom.xml +++ b/services/savingsplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT savingsplans AWS Java SDK :: Services :: Savingsplans diff --git a/services/scheduler/pom.xml b/services/scheduler/pom.xml index 069bb0838d5f..5be5e8ef4ba8 100644 --- a/services/scheduler/pom.xml +++ b/services/scheduler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT scheduler AWS Java SDK :: Services :: Scheduler diff --git a/services/schemas/pom.xml b/services/schemas/pom.xml index 2e0454f9f6f3..d2c6d7a09657 100644 --- a/services/schemas/pom.xml +++ b/services/schemas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT schemas AWS Java SDK :: Services :: Schemas diff --git a/services/secretsmanager/pom.xml b/services/secretsmanager/pom.xml index d57b558f6efb..f82702293544 100644 --- a/services/secretsmanager/pom.xml +++ b/services/secretsmanager/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT secretsmanager AWS Java SDK :: Services :: AWS Secrets Manager diff --git a/services/securityhub/pom.xml b/services/securityhub/pom.xml index 6a7aa36a4b2c..862c3453fcef 100644 --- a/services/securityhub/pom.xml +++ b/services/securityhub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT securityhub AWS Java SDK :: Services :: SecurityHub diff --git a/services/securityhub/src/main/resources/codegen-resources/service-2.json b/services/securityhub/src/main/resources/codegen-resources/service-2.json index 602ef04e9ae4..f2adc9ddd778 100644 --- a/services/securityhub/src/main/resources/codegen-resources/service-2.json +++ b/services/securityhub/src/main/resources/codegen-resources/service-2.json @@ -48,6 +48,23 @@ "deprecated":true, "deprecatedMessage":"This API has been deprecated, use AcceptAdministratorInvitation API instead." }, + "BatchDeleteAutomationRules":{ + "name":"BatchDeleteAutomationRules", + "http":{ + "method":"POST", + "requestUri":"/automationrules/delete" + }, + "input":{"shape":"BatchDeleteAutomationRulesRequest"}, + "output":{"shape":"BatchDeleteAutomationRulesResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidAccessException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Deletes one or more automation rules.

    " + }, "BatchDisableStandards":{ "name":"BatchDisableStandards", "http":{ @@ -80,6 +97,24 @@ ], "documentation":"

    Enables the standards specified by the provided StandardsArn. To obtain the ARN for a standard, use the DescribeStandards operation.

    For more information, see the Security Standards section of the Security Hub User Guide.

    " }, + "BatchGetAutomationRules":{ + "name":"BatchGetAutomationRules", + "http":{ + "method":"POST", + "requestUri":"/automationrules/get" + }, + "input":{"shape":"BatchGetAutomationRulesRequest"}, + "output":{"shape":"BatchGetAutomationRulesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalException"}, + {"shape":"InvalidAccessException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Retrieves a list of details for automation rules based on rule Amazon Resource Names (ARNs).

    " + }, "BatchGetSecurityControls":{ "name":"BatchGetSecurityControls", "http":{ @@ -128,6 +163,23 @@ ], "documentation":"

    Imports security findings generated by a finding provider into Security Hub. This action is requested by the finding provider to import its findings into Security Hub.

    BatchImportFindings must be called by one of the following:

    • The Amazon Web Services account that is associated with a finding if you are using the default product ARN or are a partner sending findings from within a customer's Amazon Web Services account. In these cases, the identifier of the account that you are calling BatchImportFindings from needs to be the same as the AwsAccountId attribute for the finding.

    • An Amazon Web Services account that Security Hub has allow-listed for an official partner integration. In this case, you can call BatchImportFindings from the allow-listed account and send findings from different customer accounts in the same batch.

    The maximum allowed size for a finding is 240 Kb. An error is returned for any finding larger than 240 Kb.

    After a finding is created, BatchImportFindings cannot be used to update the following finding fields and objects, which Security Hub customers use to manage their investigation workflow.

    • Note

    • UserDefinedFields

    • VerificationState

    • Workflow

    Finding providers also should not use BatchImportFindings to update the following attributes.

    • Confidence

    • Criticality

    • RelatedFindings

    • Severity

    • Types

    Instead, finding providers use FindingProviderFields to provide values for these attributes.

    " }, + "BatchUpdateAutomationRules":{ + "name":"BatchUpdateAutomationRules", + "http":{ + "method":"PATCH", + "requestUri":"/automationrules/update" + }, + "input":{"shape":"BatchUpdateAutomationRulesRequest"}, + "output":{"shape":"BatchUpdateAutomationRulesResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidAccessException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Updates one or more automation rules based on rule Amazon Resource Names (ARNs) and input parameters.

    " + }, "BatchUpdateFindings":{ "name":"BatchUpdateFindings", "http":{ @@ -177,6 +229,23 @@ ], "documentation":"

    Creates a custom action target in Security Hub.

    You can use custom actions on findings and insights in Security Hub to trigger target actions in Amazon CloudWatch Events.

    " }, + "CreateAutomationRule":{ + "name":"CreateAutomationRule", + "http":{ + "method":"POST", + "requestUri":"/automationrules/create" + }, + "input":{"shape":"CreateAutomationRuleRequest"}, + "output":{"shape":"CreateAutomationRuleResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalException"}, + {"shape":"InvalidAccessException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Creates an automation rule based on input parameters.

    " + }, "CreateFindingAggregator":{ "name":"CreateFindingAggregator", "http":{ @@ -763,6 +832,23 @@ ], "documentation":"

    Invites other Amazon Web Services accounts to become member accounts for the Security Hub administrator account that the invitation is sent from.

    This operation is only used to invite accounts that do not belong to an organization. Organization accounts do not receive invitations.

    Before you can use this action to invite a member, you must first use the CreateMembers action to create the member account in Security Hub.

    When the account owner enables Security Hub and accepts the invitation to become a member account, the administrator account can view the findings generated from the member account.

    " }, + "ListAutomationRules":{ + "name":"ListAutomationRules", + "http":{ + "method":"GET", + "requestUri":"/automationrules/list" + }, + "input":{"shape":"ListAutomationRulesRequest"}, + "output":{"shape":"ListAutomationRulesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalException"}, + {"shape":"InvalidAccessException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    A list of automation rules and their metadata for the calling account.

    " + }, "ListEnabledProductsForImport":{ "name":"ListEnabledProductsForImport", "http":{ @@ -1144,6 +1230,12 @@ }, "documentation":"

    Provides details about one of the following actions that affects or that was taken on a resource:

    • A remote IP address issued an Amazon Web Services API call

    • A DNS request was received

    • A remote IP address attempted to connect to an EC2 instance

    • A remote IP address attempted a port probe on an EC2 instance

    " }, + "ActionList":{ + "type":"list", + "member":{"shape":"AutomationRulesAction"}, + "max":1, + "min":1 + }, "ActionLocalIpDetails":{ "type":"structure", "members":{ @@ -1363,6 +1455,309 @@ "DEFAULT" ] }, + "AutomationRulesAction":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"AutomationRulesActionType", + "documentation":"

    Specifies that the rule action should update the Types finding field. The Types finding field provides one or more finding types in the format of namespace/category/classifier that classify a finding. For more information, see Types taxonomy for ASFF in the Security Hub User Guide.

    " + }, + "FindingFieldsUpdate":{ + "shape":"AutomationRulesFindingFieldsUpdate", + "documentation":"

    Specifies that the automation rule action is an update to a finding field.

    " + } + }, + "documentation":"

    One or more actions to update finding fields if a finding matches the defined criteria of the rule.

    " + }, + "AutomationRulesActionType":{ + "type":"string", + "enum":["FINDING_FIELDS_UPDATE"] + }, + "AutomationRulesArnsList":{ + "type":"list", + "member":{"shape":"NonEmptyString"}, + "max":100, + "min":1 + }, + "AutomationRulesConfig":{ + "type":"structure", + "members":{ + "RuleArn":{ + "shape":"NonEmptyString", + "documentation":"

    The Amazon Resource Name (ARN) of a rule.

    " + }, + "RuleStatus":{ + "shape":"RuleStatus", + "documentation":"

    Whether the rule is active after it is created. If this parameter is equal to >ENABLED, Security Hub will apply the rule to findings and finding updates after the rule is created.

    " + }, + "RuleOrder":{ + "shape":"RuleOrderValue", + "documentation":"

    An integer ranging from 1 to 1000 that represents the order in which the rule action is applied to findings. Security Hub applies rules with lower values for this parameter first.

    " + }, + "RuleName":{ + "shape":"NonEmptyString", + "documentation":"

    The name of the rule.

    " + }, + "Description":{ + "shape":"NonEmptyString", + "documentation":"

    A description of the rule.

    " + }, + "IsTerminal":{ + "shape":"Boolean", + "documentation":"

    Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If the value of this field is set to true for a rule, Security Hub applies the rule action to a finding that matches the rule criteria and won't evaluate other rules for the finding.
 The default value of this field is false.

    " + }, + "Criteria":{ + "shape":"AutomationRulesFindingFilters", + "documentation":"

    A set of Amazon Web Services Security Finding Format finding field attributes and corresponding expected values that Security Hub uses to filter findings. If a finding matches the conditions specified in this parameter, Security Hub applies the rule action to the finding.

    " + }, + "Actions":{ + "shape":"ActionList", + "documentation":"

    One or more actions to update finding fields if a finding matches the defined criteria of the rule.

    " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    A timestamp that indicates when the rule was created.

    Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

    " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    A timestamp that indicates when the rule was most recently updated.

    Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

    " + }, + "CreatedBy":{ + "shape":"NonEmptyString", + "documentation":"

    The principal that created a rule.

    " + } + }, + "documentation":"

    Defines the configuration of an automation rule.

    " + }, + "AutomationRulesConfigList":{ + "type":"list", + "member":{"shape":"AutomationRulesConfig"} + }, + "AutomationRulesFindingFieldsUpdate":{ + "type":"structure", + "members":{ + "Note":{"shape":"NoteUpdate"}, + "Severity":{"shape":"SeverityUpdate"}, + "VerificationState":{ + "shape":"VerificationState", + "documentation":"

    The rule action will update the VerificationState field of a finding.

    " + }, + "Confidence":{ + "shape":"RatioScale", + "documentation":"

    The rule action will update the Confidence field of a finding.

    " + }, + "Criticality":{ + "shape":"RatioScale", + "documentation":"

    The rule action will update the Criticality field of a finding.

    " + }, + "Types":{ + "shape":"TypeList", + "documentation":"

    The rule action will update the Types field of a finding.

    " + }, + "UserDefinedFields":{ + "shape":"FieldMap", + "documentation":"

    The rule action will update the UserDefinedFields field of a finding.

    " + }, + "Workflow":{"shape":"WorkflowUpdate"}, + "RelatedFindings":{ + "shape":"RelatedFindingList", + "documentation":"

    A list of findings that are related to a finding.

    " + } + }, + "documentation":"

    Identifies the finding fields that the automation rule action will update when a finding matches the defined criteria.

    " + }, + "AutomationRulesFindingFilters":{ + "type":"structure", + "members":{ + "ProductArn":{ + "shape":"StringFilterList", + "documentation":"

    The Amazon Resource Name (ARN) for a third-party product that generated a finding in Security Hub.

    " + }, + "AwsAccountId":{ + "shape":"StringFilterList", + "documentation":"

    The Amazon Web Services account ID in which a finding was generated.

    " + }, + "Id":{ + "shape":"StringFilterList", + "documentation":"

    The product-specific identifier for a finding.

    " + }, + "GeneratorId":{ + "shape":"StringFilterList", + "documentation":"

    The identifier for the solution-specific component that generated a finding.

    " + }, + "Type":{ + "shape":"StringFilterList", + "documentation":"

    One or more finding types in the format of namespace/category/classifier that classify a finding. For a list of namespaces, classifiers, and categories, see Types taxonomy for ASFF in the Security Hub User Guide.

    " + }, + "FirstObservedAt":{ + "shape":"DateFilterList", + "documentation":"

    A timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings product.

    Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

    " + }, + "LastObservedAt":{ + "shape":"DateFilterList", + "documentation":"

    A timestamp that indicates when the potential security issue captured by a finding was most recently observed by the security findings product.

    Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

    " + }, + "CreatedAt":{ + "shape":"DateFilterList", + "documentation":"

    A timestamp that indicates when this finding record was created.

    Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

    " + }, + "UpdatedAt":{ + "shape":"DateFilterList", + "documentation":"

    A timestamp that indicates when the finding record was most recently updated.

    Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

    " + }, + "Confidence":{ + "shape":"NumberFilterList", + "documentation":"

    The likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0–100 basis using a ratio scale. A value of 0 means 0 percent confidence, and a value of 100 means 100 percent confidence. For example, a data exfiltration detection based on a statistical deviation of network traffic has low confidence because an actual exfiltration hasn't been verified. For more information, see Confidence in the Security Hub User Guide.

    " + }, + "Criticality":{ + "shape":"NumberFilterList", + "documentation":"

    The level of importance that is assigned to the resources that are associated with a finding. Criticality is scored on a 0–100 basis, using a ratio scale that supports only full integers. A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources. For more information, see Criticality in the Security Hub User Guide.

    " + }, + "Title":{ + "shape":"StringFilterList", + "documentation":"

    A finding's title.

    " + }, + "Description":{ + "shape":"StringFilterList", + "documentation":"

    A finding's description.

    " + }, + "SourceUrl":{ + "shape":"StringFilterList", + "documentation":"

    Provides a URL that links to a page about the current finding in the finding product.

    " + }, + "ProductName":{ + "shape":"StringFilterList", + "documentation":"

    Provides the name of the product that generated the finding. For control-based findings, the product name is Security Hub.

    " + }, + "CompanyName":{ + "shape":"StringFilterList", + "documentation":"

    The name of the company for the product that generated the finding. For control-based findings, the company is Amazon Web Services.

    " + }, + "SeverityLabel":{ + "shape":"StringFilterList", + "documentation":"

    The severity value of the finding.

    " + }, + "ResourceType":{ + "shape":"StringFilterList", + "documentation":"

    The type of resource that the finding pertains to.

    " + }, + "ResourceId":{ + "shape":"StringFilterList", + "documentation":"

    The identifier for the given resource type. For Amazon Web Services resources that are identified by Amazon Resource Names (ARNs), this is the ARN. For Amazon Web Services resources that lack ARNs, this is the identifier as defined by the Amazon Web Service that created the resource. For non-Amazon Web Services resources, this is a unique identifier that is associated with the resource.

    " + }, + "ResourcePartition":{ + "shape":"StringFilterList", + "documentation":"

    The partition in which the resource that the finding pertains to is located. A partition is a group of Amazon Web Services Regions. Each Amazon Web Services account is scoped to one partition.

    " + }, + "ResourceRegion":{ + "shape":"StringFilterList", + "documentation":"

    The Amazon Web Services Region where the resource that a finding pertains to is located.

    " + }, + "ResourceTags":{ + "shape":"MapFilterList", + "documentation":"

    A list of Amazon Web Services tags associated with a resource at the time the finding was processed.

    " + }, + "ResourceDetailsOther":{ + "shape":"MapFilterList", + "documentation":"

    Custom fields and values about the resource that a finding pertains to.

    " + }, + "ComplianceStatus":{ + "shape":"StringFilterList", + "documentation":"

    The result of a security check. This field is only used for findings generated from controls.

    " + }, + "ComplianceSecurityControlId":{ + "shape":"StringFilterList", + "documentation":"

    The security control ID for which a finding was generated. Security control IDs are the same across standards.

    " + }, + "ComplianceAssociatedStandardsId":{ + "shape":"StringFilterList", + "documentation":"

    The unique identifier of a standard in which a control is enabled. This field consists of the resource portion of the Amazon Resource Name (ARN) returned for a standard in the DescribeStandards API response.

    " + }, + "VerificationState":{ + "shape":"StringFilterList", + "documentation":"

    Provides the veracity of a finding.

    " + }, + "WorkflowStatus":{ + "shape":"StringFilterList", + "documentation":"

    Provides information about the status of the investigation into a finding.

    " + }, + "RecordState":{ + "shape":"StringFilterList", + "documentation":"

    Provides the current state of a finding.

    " + }, + "RelatedFindingsProductArn":{ + "shape":"StringFilterList", + "documentation":"

    The ARN for the product that generated a related finding.

    " + }, + "RelatedFindingsId":{ + "shape":"StringFilterList", + "documentation":"

    The product-generated identifier for a related finding.

    " + }, + "NoteText":{ + "shape":"StringFilterList", + "documentation":"

    The text of a user-defined note that's added to a finding.

    " + }, + "NoteUpdatedAt":{ + "shape":"DateFilterList", + "documentation":"

    The timestamp of when the note was updated. Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

    " + }, + "NoteUpdatedBy":{ + "shape":"StringFilterList", + "documentation":"

    The principal that created a note.

    " + }, + "UserDefinedFields":{ + "shape":"MapFilterList", + "documentation":"

    A list of user-defined name and value string pairs added to a finding.

    " + } + }, + "documentation":"

    The criteria that determine which findings a rule applies to.

    " + }, + "AutomationRulesMetadata":{ + "type":"structure", + "members":{ + "RuleArn":{ + "shape":"NonEmptyString", + "documentation":"

    The Amazon Resource Name (ARN) for the rule.

    " + }, + "RuleStatus":{ + "shape":"RuleStatus", + "documentation":"

    Whether the rule is active after it is created. If this parameter is equal to ENABLED, Security Hub will apply the rule to findings and finding updates after the rule is created. To change the value of this parameter after creating a rule, use BatchUpdateAutomationRules.

    " + }, + "RuleOrder":{ + "shape":"RuleOrderValue", + "documentation":"

    An integer ranging from 1 to 1000 that represents the order in which the rule action is applied to findings. Security Hub applies rules with lower values for this parameter first.

    " + }, + "RuleName":{ + "shape":"NonEmptyString", + "documentation":"

    The name of the rule.

    " + }, + "Description":{ + "shape":"NonEmptyString", + "documentation":"

    A description of the rule.

    " + }, + "IsTerminal":{ + "shape":"Boolean", + "documentation":"

    Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If the value of this field is set to true for a rule, Security Hub applies the rule action to a finding that matches the rule criteria and won't evaluate other rules for the finding.
 The default value of this field is false.

    " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

    A timestamp that indicates when the rule was created.

    Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

    " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

    A timestamp that indicates when the rule was most recently updated.

    Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z.

    " + }, + "CreatedBy":{ + "shape":"NonEmptyString", + "documentation":"

    The principal that created a rule.

    " + } + }, + "documentation":"

    Metadata for automation rules in the calling account. The response includes rules with a RuleStatus of ENABLED and DISABLED.

    " + }, + "AutomationRulesMetadataList":{ + "type":"list", + "member":{"shape":"AutomationRulesMetadata"} + }, "AvailabilityZone":{ "type":"structure", "members":{ @@ -13737,6 +14132,29 @@ }, "documentation":"

    Information about the encryption configuration for X-Ray.

    " }, + "BatchDeleteAutomationRulesRequest":{ + "type":"structure", + "required":["AutomationRulesArns"], + "members":{ + "AutomationRulesArns":{ + "shape":"AutomationRulesArnsList", + "documentation":"

    A list of Amazon Resource Names (ARNs) for the rules that are to be deleted.

    " + } + } + }, + "BatchDeleteAutomationRulesResponse":{ + "type":"structure", + "members":{ + "ProcessedAutomationRules":{ + "shape":"AutomationRulesArnsList", + "documentation":"

    A list of properly processed rule ARNs.

    " + }, + "UnprocessedAutomationRules":{ + "shape":"UnprocessedAutomationRulesList", + "documentation":"

    A list of objects containing RuleArn, ErrorCode, and ErrorMessage. This parameter tells you which automation rules the request didn't delete and why.

    " + } + } + }, "BatchDisableStandardsRequest":{ "type":"structure", "required":["StandardsSubscriptionArns"], @@ -13775,6 +14193,29 @@ } } }, + "BatchGetAutomationRulesRequest":{ + "type":"structure", + "required":["AutomationRulesArns"], + "members":{ + "AutomationRulesArns":{ + "shape":"AutomationRulesArnsList", + "documentation":"

    A list of rule ARNs to get details for.

    " + } + } + }, + "BatchGetAutomationRulesResponse":{ + "type":"structure", + "members":{ + "Rules":{ + "shape":"AutomationRulesConfigList", + "documentation":"

    A list of rule details for the provided rule ARNs.

    " + }, + "UnprocessedAutomationRules":{ + "shape":"UnprocessedAutomationRulesList", + "documentation":"

    A list of objects containing RuleArn, ErrorCode, and ErrorMessage. This parameter tells you which automation rules the request didn't retrieve and why.

    " + } + } + }, "BatchGetSecurityControlsRequest":{ "type":"structure", "required":["SecurityControlIds"], @@ -13860,6 +14301,29 @@ } } }, + "BatchUpdateAutomationRulesRequest":{ + "type":"structure", + "required":["UpdateAutomationRulesRequestItems"], + "members":{ + "UpdateAutomationRulesRequestItems":{ + "shape":"UpdateAutomationRulesRequestItemsList", + "documentation":"

    An array of ARNs for the rules that are to be updated. Optionally, you can also include RuleStatus and RuleOrder.

    " + } + } + }, + "BatchUpdateAutomationRulesResponse":{ + "type":"structure", + "members":{ + "ProcessedAutomationRules":{ + "shape":"AutomationRulesArnsList", + "documentation":"

    A list of properly processed rule ARNs.

    " + }, + "UnprocessedAutomationRules":{ + "shape":"UnprocessedAutomationRulesList", + "documentation":"

    A list of objects containing RuleArn, ErrorCode, and ErrorMessage. This parameter tells you which automation rules the request didn't update and why.

    " + } + } + }, "BatchUpdateFindingsRequest":{ "type":"structure", "required":["FindingIdentifiers"], @@ -14216,6 +14680,59 @@ } } }, + "CreateAutomationRuleRequest":{ + "type":"structure", + "required":[ + "RuleOrder", + "RuleName", + "Description", + "Criteria", + "Actions" + ], + "members":{ + "Tags":{ + "shape":"TagMap", + "documentation":"

    User-defined tags that help you label the purpose of a rule.

    " + }, + "RuleStatus":{ + "shape":"RuleStatus", + "documentation":"

    Whether the rule is active after it is created. If this parameter is equal to Enabled, Security Hub will apply the rule to findings and finding updates after the rule is created. To change the value of this parameter after creating a rule, use BatchUpdateAutomationRules.

    " + }, + "RuleOrder":{ + "shape":"RuleOrderValue", + "documentation":"

    An integer ranging from 1 to 1000 that represents the order in which the rule action is applied to findings. Security Hub applies rules with lower values for this parameter first.

    " + }, + "RuleName":{ + "shape":"NonEmptyString", + "documentation":"

    The name of the rule.

    " + }, + "Description":{ + "shape":"NonEmptyString", + "documentation":"

    A description of the rule.

    " + }, + "IsTerminal":{ + "shape":"Boolean", + "documentation":"

    Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If the value of this field is set to true for a rule, Security Hub applies the rule action to a finding that matches the rule criteria and won't evaluate other rules for the finding. The default value of this field is false.

    " + }, + "Criteria":{ + "shape":"AutomationRulesFindingFilters", + "documentation":"

    A set of ASFF finding field attributes and corresponding expected values that Security Hub uses to filter findings. If a finding matches the conditions specified in this parameter, Security Hub applies the rule action to the finding.

    " + }, + "Actions":{ + "shape":"ActionList", + "documentation":"

    One or more actions to update finding fields if a finding matches the conditions specified in Criteria.

    " + } + } + }, + "CreateAutomationRuleResponse":{ + "type":"structure", + "members":{ + "RuleArn":{ + "shape":"NonEmptyString", + "documentation":"

    The Amazon Resource Name (ARN) of the automation rule that you created.

    " + } + } + }, "CreateFindingAggregatorRequest":{ "type":"structure", "required":["RegionLinkingMode"], @@ -15680,6 +16197,36 @@ "error":{"httpStatusCode":429}, "exception":true }, + "ListAutomationRulesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A token to specify where to start paginating the response. This is the NextToken from a previously truncated response. On your first call to the ListAutomationRules API, set the value of this parameter to NULL.

    ", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of rules to return in the response. This currently ranges from 1 to 100.

    ", + "location":"querystring", + "locationName":"MaxResults" + } + } + }, + "ListAutomationRulesResponse":{ + "type":"structure", + "members":{ + "AutomationRulesMetadata":{ + "shape":"AutomationRulesMetadataList", + "documentation":"

    Metadata for rules in the calling account. The response includes rules with a RuleStatus of ENABLED and DISABLED.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A pagination token for the response.

    " + } + } + }, "ListEnabledProductsForImportRequest":{ "type":"structure", "members":{ @@ -17530,6 +18077,18 @@ }, "documentation":"

    A list of port ranges.

    " }, + "RuleOrderValue":{ + "type":"integer", + "max":1000, + "min":1 + }, + "RuleStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "SecurityControl":{ "type":"structure", "required":[ @@ -18387,6 +18946,28 @@ "type":"list", "member":{"shape":"NonEmptyString"} }, + "UnprocessedAutomationRule":{ + "type":"structure", + "members":{ + "RuleArn":{ + "shape":"NonEmptyString", + "documentation":"

    The Amazon Resource Name (ARN) for the unprocessed automation rule.

    " + }, + "ErrorCode":{ + "shape":"Integer", + "documentation":"

    The error code associated with the unprocessed automation rule.

    " + }, + "ErrorMessage":{ + "shape":"NonEmptyString", + "documentation":"

    An error message describing why a request didn't process a specific rule.

    " + } + }, + "documentation":"

    A list of objects containing RuleArn, ErrorCode, and ErrorMessage. This parameter tells you which automation rules the request didn't process and why.

    " + }, + "UnprocessedAutomationRulesList":{ + "type":"list", + "member":{"shape":"UnprocessedAutomationRule"} + }, "UnprocessedErrorCode":{ "type":"string", "enum":[ @@ -18525,6 +19106,51 @@ "members":{ } }, + "UpdateAutomationRulesRequestItem":{ + "type":"structure", + "required":["RuleArn"], + "members":{ + "RuleArn":{ + "shape":"NonEmptyString", + "documentation":"

    The Amazon Resource Name (ARN) for the rule.

    " + }, + "RuleStatus":{ + "shape":"RuleStatus", + "documentation":"

    Whether the rule is active after it is created. If this parameter is equal to ENABLED, Security Hub will apply the rule to findings and finding updates after the rule is created. To change the value of this parameter after creating a rule, use BatchUpdateAutomationRules.

    " + }, + "RuleOrder":{ + "shape":"RuleOrderValue", + "documentation":"

    An integer ranging from 1 to 1000 that represents the order in which the rule action is applied to findings. Security Hub applies rules with lower values for this parameter first.

    " + }, + "Description":{ + "shape":"NonEmptyString", + "documentation":"

    A description of the rule.

    " + }, + "RuleName":{ + "shape":"NonEmptyString", + "documentation":"

    The name of the rule.

    " + }, + "IsTerminal":{ + "shape":"Boolean", + "documentation":"

    Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If the value of this field is set to true for a rule, Security Hub applies the rule action to a finding that matches the rule criteria and won't evaluate other rules for the finding.
 The default value of this field is false.

    " + }, + "Criteria":{ + "shape":"AutomationRulesFindingFilters", + "documentation":"

    A set of ASFF finding field attributes and corresponding expected values that Security Hub uses to filter findings. If a finding matches the conditions specified in this parameter, Security Hub applies the rule action to the finding.

    " + }, + "Actions":{ + "shape":"ActionList", + "documentation":"

    One or more actions to update finding fields if a finding matches the conditions specified in Criteria.

    " + } + }, + "documentation":"

    Specifies the parameters to update in an existing automation rule.

    " + }, + "UpdateAutomationRulesRequestItemsList":{ + "type":"list", + "member":{"shape":"UpdateAutomationRulesRequestItem"}, + "max":100, + "min":1 + }, "UpdateFindingAggregatorRequest":{ "type":"structure", "required":[ diff --git a/services/securitylake/pom.xml b/services/securitylake/pom.xml index dff3f249f007..e8f37f92a4ee 100644 --- a/services/securitylake/pom.xml +++ b/services/securitylake/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT securitylake AWS Java SDK :: Services :: Security Lake diff --git a/services/serverlessapplicationrepository/pom.xml b/services/serverlessapplicationrepository/pom.xml index caf873be8399..da477b29a4ee 100644 --- a/services/serverlessapplicationrepository/pom.xml +++ b/services/serverlessapplicationrepository/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 serverlessapplicationrepository diff --git a/services/servicecatalog/pom.xml b/services/servicecatalog/pom.xml index 5d14fc7e029b..406f8a45eac5 100644 --- a/services/servicecatalog/pom.xml +++ b/services/servicecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT servicecatalog AWS Java SDK :: Services :: AWS Service Catalog diff --git a/services/servicecatalogappregistry/pom.xml b/services/servicecatalogappregistry/pom.xml index 0a4451c9146f..30cd702d7ddf 100644 --- a/services/servicecatalogappregistry/pom.xml +++ b/services/servicecatalogappregistry/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT servicecatalogappregistry AWS Java SDK :: Services :: Service Catalog App Registry diff --git a/services/servicediscovery/pom.xml b/services/servicediscovery/pom.xml index 800ec4bbba1b..b49a57b8f788 100644 --- a/services/servicediscovery/pom.xml +++ b/services/servicediscovery/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 servicediscovery diff --git a/services/servicequotas/pom.xml b/services/servicequotas/pom.xml index 52f2f629e974..75a7376cd44c 100644 --- a/services/servicequotas/pom.xml +++ b/services/servicequotas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT servicequotas AWS Java SDK :: Services :: Service Quotas diff --git a/services/ses/pom.xml b/services/ses/pom.xml index b4c4058484a6..10d2f0262ad0 100644 --- a/services/ses/pom.xml +++ b/services/ses/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ses AWS Java SDK :: Services :: Amazon SES diff --git a/services/sesv2/pom.xml b/services/sesv2/pom.xml index 24ddebb08d80..2271e4be693a 100644 --- a/services/sesv2/pom.xml +++ b/services/sesv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT sesv2 AWS Java SDK :: Services :: SESv2 diff --git a/services/sfn/pom.xml b/services/sfn/pom.xml index 09defdc57268..1a9ffa154337 100644 --- a/services/sfn/pom.xml +++ b/services/sfn/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT sfn AWS Java SDK :: Services :: AWS Step Functions diff --git a/services/shield/pom.xml b/services/shield/pom.xml index 21dcd3197b2d..9c5bd0b828c0 100644 --- a/services/shield/pom.xml +++ b/services/shield/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT shield AWS Java SDK :: Services :: AWS Shield diff --git a/services/signer/pom.xml b/services/signer/pom.xml index d32c1d6b1844..b449709ce69f 100644 --- a/services/signer/pom.xml +++ b/services/signer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT signer AWS Java SDK :: Services :: Signer diff --git a/services/simspaceweaver/pom.xml b/services/simspaceweaver/pom.xml index 47c55382be3e..133c45c38b2f 100644 --- a/services/simspaceweaver/pom.xml +++ b/services/simspaceweaver/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT simspaceweaver AWS Java SDK :: Services :: Sim Space Weaver diff --git a/services/simspaceweaver/src/main/resources/codegen-resources/endpoint-tests.json b/services/simspaceweaver/src/main/resources/codegen-resources/endpoint-tests.json index d44d3bd03f2c..38383d6e5e0c 100644 --- a/services/simspaceweaver/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/simspaceweaver/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { - "Region": "us-gov-east-1", + "UseDualStack": true, "UseFIPS": true, - "UseDualStack": true + "Region": "us-gov-east-1" } }, { @@ -21,9 +21,9 @@ } }, "params": { - "Region": "us-gov-east-1", + "UseDualStack": false, "UseFIPS": true, - "UseDualStack": false + "Region": "us-gov-east-1" } }, { @@ -34,9 +34,9 @@ } }, "params": { - "Region": "us-gov-east-1", + "UseDualStack": true, "UseFIPS": false, - "UseDualStack": true + "Region": "us-gov-east-1" } }, { @@ -47,9 +47,9 @@ } }, "params": { - "Region": "us-gov-east-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "us-gov-east-1" } }, { @@ -60,9 +60,9 @@ } }, "params": { - "Region": "cn-north-1", + "UseDualStack": true, "UseFIPS": true, - "UseDualStack": true + "Region": "cn-north-1" } }, { @@ -73,9 +73,9 @@ } }, "params": { - "Region": "cn-north-1", + "UseDualStack": false, "UseFIPS": true, - "UseDualStack": false + "Region": "cn-north-1" } }, { @@ -86,9 +86,9 @@ } }, "params": { - "Region": "cn-north-1", + "UseDualStack": true, "UseFIPS": false, - "UseDualStack": true + "Region": "cn-north-1" } }, { @@ -99,9 +99,9 @@ } }, "params": { - "Region": "cn-north-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "cn-north-1" } }, { @@ -110,9 +110,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-iso-east-1", + "UseDualStack": true, "UseFIPS": true, - "UseDualStack": true + "Region": "us-iso-east-1" } }, { @@ -123,9 +123,9 @@ } }, "params": { - "Region": "us-iso-east-1", + "UseDualStack": false, "UseFIPS": true, - "UseDualStack": false + "Region": "us-iso-east-1" } }, { @@ -134,9 +134,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-iso-east-1", + "UseDualStack": true, "UseFIPS": false, - "UseDualStack": true + "Region": "us-iso-east-1" } }, { @@ -147,9 +147,9 @@ } }, "params": { - "Region": "us-iso-east-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "us-iso-east-1" } }, { @@ -160,9 +160,9 @@ } }, "params": { - "Region": "us-east-1", + "UseDualStack": true, "UseFIPS": true, - "UseDualStack": true + "Region": "us-east-1" } }, { @@ -173,9 +173,9 @@ } }, "params": { - "Region": "us-east-1", + "UseDualStack": false, "UseFIPS": true, - "UseDualStack": false + "Region": "us-east-1" } }, { @@ -186,9 +186,9 @@ } }, "params": { - "Region": "us-east-1", + "UseDualStack": true, "UseFIPS": false, - "UseDualStack": true + "Region": "us-east-1" } }, { @@ -199,9 +199,9 @@ } }, "params": { - "Region": "us-east-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "us-east-1" } }, { @@ -210,9 +210,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-isob-east-1", + "UseDualStack": true, "UseFIPS": true, - "UseDualStack": true + "Region": "us-isob-east-1" } }, { @@ -223,9 +223,9 @@ } }, "params": { - "Region": "us-isob-east-1", + "UseDualStack": false, "UseFIPS": true, - "UseDualStack": false + "Region": "us-isob-east-1" } }, { @@ -234,9 +234,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-isob-east-1", + "UseDualStack": true, "UseFIPS": false, - "UseDualStack": true + "Region": "us-isob-east-1" } }, { @@ -247,9 +247,9 @@ } }, "params": { - "Region": "us-isob-east-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "us-isob-east-1" } }, { @@ -260,9 +260,9 @@ } }, "params": { - "Region": "us-east-1", - "UseFIPS": false, "UseDualStack": false, + "UseFIPS": false, + "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -272,9 +272,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "Region": "us-east-1", - "UseFIPS": true, "UseDualStack": false, + "UseFIPS": true, + "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -284,9 +284,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "Region": "us-east-1", - "UseFIPS": false, "UseDualStack": true, + "UseFIPS": false, + "Region": "us-east-1", "Endpoint": "https://example.com" } } diff --git a/services/simspaceweaver/src/main/resources/codegen-resources/service-2.json b/services/simspaceweaver/src/main/resources/codegen-resources/service-2.json index e3a9711634f1..7f94c800ddf9 100644 --- a/services/simspaceweaver/src/main/resources/codegen-resources/service-2.json +++ b/services/simspaceweaver/src/main/resources/codegen-resources/service-2.json @@ -368,7 +368,7 @@ "members":{ "Destination":{ "shape":"S3Destination", - "documentation":"

    The Amazon S3 bucket and optional folder (object key prefix) where SimSpace Weaver creates the snapshot file.

    " + "documentation":"

    The Amazon S3 bucket and optional folder (object key prefix) where SimSpace Weaver creates the snapshot file.

    The Amazon S3 bucket must be in the same Amazon Web Services Region as the simulation.

    " }, "Simulation":{ "shape":"SimSpaceWeaverResourceName", @@ -751,7 +751,7 @@ "type":"string", "max":1600, "min":0, - "pattern":"^arn:(?:aws|aws-cn):log-group:([a-z]{2}-[a-z]+-\\d{1}):(\\d{12})?:role\\/(.+)$" + "pattern":"^arn:(?:aws|aws-cn|aws-us-gov):log-group:([a-z]{2}-[a-z]+-\\d{1}):(\\d{12})?:role\\/(.+)$" }, "LoggingConfiguration":{ "type":"structure", @@ -806,7 +806,7 @@ "type":"string", "max":1600, "min":0, - "pattern":"^arn:(?:aws|aws-cn):iam::(\\d{12})?:role\\/(.+)$" + "pattern":"^arn:(?:aws|aws-cn|aws-us-gov):iam::(\\d{12})?:role\\/(.+)$" }, "S3Destination":{ "type":"structure", @@ -852,7 +852,7 @@ "type":"string", "max":1600, "min":0, - "pattern":"^arn:(?:aws|aws-cn):simspaceweaver:([a-z]{2}-[a-z]+-\\d{1}):(\\d{12})?:([a-z]+)\\/(.+)$" + "pattern":"^arn:(?:aws|aws-cn|aws-us-gov):simspaceweaver:([a-z]{2}-[a-z]+-\\d{1}):(\\d{12})?:([a-z]+)\\/(.+)$" }, "SimSpaceWeaverLongResourceName":{ "type":"string", @@ -1112,7 +1112,7 @@ }, "SnapshotS3Location":{ "shape":"S3Location", - "documentation":"

    The location of the snapshot .zip file in Amazon Simple Storage Service (Amazon S3). For more information about Amazon S3, see the Amazon Simple Storage Service User Guide .

    Provide a SnapshotS3Location to start your simulation from a snapshot.

    If you provide a SnapshotS3Location then you can't provide a SchemaS3Location.

    " + "documentation":"

    The location of the snapshot .zip file in Amazon Simple Storage Service (Amazon S3). For more information about Amazon S3, see the Amazon Simple Storage Service User Guide .

    Provide a SnapshotS3Location to start your simulation from a snapshot.

    The Amazon S3 bucket must be in the same Amazon Web Services Region as the simulation.

    If you provide a SnapshotS3Location then you can't provide a SchemaS3Location.

    " }, "Tags":{ "shape":"TagMap", diff --git a/services/sms/pom.xml b/services/sms/pom.xml index 2e6f2bd06f38..9fdd153e33d7 100644 --- a/services/sms/pom.xml +++ b/services/sms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT sms AWS Java SDK :: Services :: AWS Server Migration diff --git a/services/snowball/pom.xml b/services/snowball/pom.xml index a16a1dc7f905..46bcb90bf035 100644 --- a/services/snowball/pom.xml +++ b/services/snowball/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT snowball AWS Java SDK :: Services :: Amazon Snowball diff --git a/services/snowdevicemanagement/pom.xml b/services/snowdevicemanagement/pom.xml index b5c60e62ca3e..da16763e1162 100644 --- a/services/snowdevicemanagement/pom.xml +++ b/services/snowdevicemanagement/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT snowdevicemanagement AWS Java SDK :: Services :: Snow Device Management diff --git a/services/sns/pom.xml b/services/sns/pom.xml index 709446d0cf3d..fb795f0132e1 100644 --- a/services/sns/pom.xml +++ b/services/sns/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT sns AWS Java SDK :: Services :: Amazon SNS diff --git a/services/sqs/pom.xml b/services/sqs/pom.xml index cf6a4c1eac56..98d2b82eabcd 100644 --- a/services/sqs/pom.xml +++ b/services/sqs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT sqs AWS Java SDK :: Services :: Amazon SQS diff --git a/services/ssm/pom.xml b/services/ssm/pom.xml index 23b282c1cae7..3c9395e1aa4d 100644 --- a/services/ssm/pom.xml +++ b/services/ssm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ssm AWS Java SDK :: Services :: AWS Simple Systems Management (SSM) diff --git a/services/ssmcontacts/pom.xml b/services/ssmcontacts/pom.xml index da7a52a3ef20..ac7b1c4a3192 100644 --- a/services/ssmcontacts/pom.xml +++ b/services/ssmcontacts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ssmcontacts AWS Java SDK :: Services :: SSM Contacts diff --git a/services/ssmincidents/pom.xml b/services/ssmincidents/pom.xml index 915a1b404355..681526267856 100644 --- a/services/ssmincidents/pom.xml +++ b/services/ssmincidents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ssmincidents AWS Java SDK :: Services :: SSM Incidents diff --git a/services/ssmsap/pom.xml b/services/ssmsap/pom.xml index 5e1918c83d3c..53a31457b5ff 100644 --- a/services/ssmsap/pom.xml +++ b/services/ssmsap/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ssmsap AWS Java SDK :: Services :: Ssm Sap diff --git a/services/sso/pom.xml b/services/sso/pom.xml index 8e2430735219..ee1ff1426df3 100644 --- a/services/sso/pom.xml +++ b/services/sso/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT sso AWS Java SDK :: Services :: SSO diff --git a/services/ssoadmin/pom.xml b/services/ssoadmin/pom.xml index c4d91b501139..565938b73772 100644 --- a/services/ssoadmin/pom.xml +++ b/services/ssoadmin/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ssoadmin AWS Java SDK :: Services :: SSO Admin diff --git a/services/ssooidc/pom.xml b/services/ssooidc/pom.xml index bce8ab0c8d63..af26da4a057d 100644 --- a/services/ssooidc/pom.xml +++ b/services/ssooidc/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ssooidc AWS Java SDK :: Services :: SSO OIDC diff --git a/services/storagegateway/pom.xml b/services/storagegateway/pom.xml index dc6fbf1554f6..01949fcb32e9 100644 --- a/services/storagegateway/pom.xml +++ b/services/storagegateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT storagegateway AWS Java SDK :: Services :: AWS Storage Gateway diff --git a/services/sts/pom.xml b/services/sts/pom.xml index de150a55fb30..c82af1da62b8 100644 --- a/services/sts/pom.xml +++ b/services/sts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT sts AWS Java SDK :: Services :: AWS STS diff --git a/services/support/pom.xml b/services/support/pom.xml index 1a8ced9a9223..f33df48492cf 100644 --- a/services/support/pom.xml +++ b/services/support/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT support AWS Java SDK :: Services :: AWS Support diff --git a/services/supportapp/pom.xml b/services/supportapp/pom.xml index d49a484bd59c..b74f7955fd6a 100644 --- a/services/supportapp/pom.xml +++ b/services/supportapp/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT supportapp AWS Java SDK :: Services :: Support App diff --git a/services/swf/pom.xml b/services/swf/pom.xml index 09d11c3cbd78..c98a460880e9 100644 --- a/services/swf/pom.xml +++ b/services/swf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT swf AWS Java SDK :: Services :: Amazon SWF diff --git a/services/synthetics/pom.xml b/services/synthetics/pom.xml index 93497d475ee8..0bd15d39c677 100644 --- a/services/synthetics/pom.xml +++ b/services/synthetics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT synthetics AWS Java SDK :: Services :: Synthetics diff --git a/services/textract/pom.xml b/services/textract/pom.xml index a6cb1f1aebd2..0d21f2fd6c5a 100644 --- a/services/textract/pom.xml +++ b/services/textract/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT textract AWS Java SDK :: Services :: Textract diff --git a/services/timestreamquery/pom.xml b/services/timestreamquery/pom.xml index 63276d428599..9e7d04bf3f99 100644 --- a/services/timestreamquery/pom.xml +++ b/services/timestreamquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT timestreamquery AWS Java SDK :: Services :: Timestream Query diff --git a/services/timestreamwrite/pom.xml b/services/timestreamwrite/pom.xml index 0b5d29a21e72..daef0d388ba4 100644 --- a/services/timestreamwrite/pom.xml +++ b/services/timestreamwrite/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT timestreamwrite AWS Java SDK :: Services :: Timestream Write diff --git a/services/tnb/pom.xml b/services/tnb/pom.xml index c80f86b1ee61..f5c748d699ad 100644 --- a/services/tnb/pom.xml +++ b/services/tnb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT tnb AWS Java SDK :: Services :: Tnb diff --git a/services/transcribe/pom.xml b/services/transcribe/pom.xml index baae27b866cf..0a898fd0ad09 100644 --- a/services/transcribe/pom.xml +++ b/services/transcribe/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT transcribe AWS Java SDK :: Services :: Transcribe diff --git a/services/transcribestreaming/pom.xml b/services/transcribestreaming/pom.xml index 872f4dc7f388..07bd2021c6b6 100644 --- a/services/transcribestreaming/pom.xml +++ b/services/transcribestreaming/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT transcribestreaming AWS Java SDK :: Services :: AWS Transcribe Streaming diff --git a/services/transfer/pom.xml b/services/transfer/pom.xml index 8cd71e21cbc3..4fcdc6e22875 100644 --- a/services/transfer/pom.xml +++ b/services/transfer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT transfer AWS Java SDK :: Services :: Transfer diff --git a/services/translate/pom.xml b/services/translate/pom.xml index 8259154b0c64..34e45ec7b1a8 100644 --- a/services/translate/pom.xml +++ b/services/translate/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 translate diff --git a/services/verifiedpermissions/pom.xml b/services/verifiedpermissions/pom.xml new file mode 100644 index 000000000000..de4763a30055 --- /dev/null +++ b/services/verifiedpermissions/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.20.90-SNAPSHOT + + verifiedpermissions + AWS Java SDK :: Services :: Verified Permissions + The AWS Java SDK for Verified Permissions module holds the client classes that are used for + communicating with Verified Permissions. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.verifiedpermissions + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/verifiedpermissions/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/verifiedpermissions/src/main/resources/codegen-resources/endpoint-rule-set.json new file mode 100644 index 000000000000..17b9ad0a91f5 --- /dev/null +++ b/services/verifiedpermissions/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://verifiedpermissions-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://verifiedpermissions-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://verifiedpermissions.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://verifiedpermissions.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + } + ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] + } + ] +} \ No newline at end of file diff --git a/services/verifiedpermissions/src/main/resources/codegen-resources/endpoint-tests.json b/services/verifiedpermissions/src/main/resources/codegen-resources/endpoint-tests.json new file mode 100644 index 000000000000..18725a06d462 --- /dev/null +++ b/services/verifiedpermissions/src/main/resources/codegen-resources/endpoint-tests.json @@ -0,0 +1,295 @@ +{ + "testCases": [ + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions.us-gov-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-gov-east-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "cn-north-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-iso-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions-fips.us-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions.us-east-1.api.aws" + } + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions.us-east-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "UseDualStack": true, + "UseFIPS": true, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://verifiedpermissions.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-isob-east-1" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/services/verifiedpermissions/src/main/resources/codegen-resources/paginators-1.json b/services/verifiedpermissions/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..4314d715de41 --- /dev/null +++ b/services/verifiedpermissions/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,28 @@ +{ + "pagination": { + "ListIdentitySources": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "identitySources" + }, + "ListPolicies": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "policies" + }, + "ListPolicyStores": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "policyStores" + }, + "ListPolicyTemplates": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "policyTemplates" + } + } +} diff --git a/services/verifiedpermissions/src/main/resources/codegen-resources/service-2.json b/services/verifiedpermissions/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..6e42825cf194 --- /dev/null +++ b/services/verifiedpermissions/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,2507 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2021-12-01", + "endpointPrefix":"verifiedpermissions", + "jsonVersion":"1.0", + "protocol":"json", + "serviceFullName":"Amazon Verified Permissions", + "serviceId":"VerifiedPermissions", + "signatureVersion":"v4", + "signingName":"verifiedpermissions", + "targetPrefix":"VerifiedPermissions", + "uid":"verifiedpermissions-2021-12-01" + }, + "operations":{ + "CreateIdentitySource":{ + "name":"CreateIdentitySource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateIdentitySourceInput"}, + "output":{"shape":"CreateIdentitySourceOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a reference to an Amazon Cognito user pool as an external identity provider (IdP).

    After you create an identity source, you can use the identities provided by the IdP as proxies for the principal in authorization queries that use the IsAuthorizedWithToken operation. These identities take the form of tokens that contain claims about the user, such as IDs, attributes and group memberships. Amazon Cognito provides both identity tokens and access tokens, and Verified Permissions can use either or both. Any combination of identity and access tokens results in the same Cedar principal. Verified Permissions automatically translates the information about the identities into the standard Cedar attributes that can be evaluated by your policies. Because the Amazon Cognito identity and access tokens can contain different information, the tokens you choose to use determine which principal attributes are available to access when evaluating Cedar policies.

    If you delete a Amazon Cognito user pool or user, tokens from that deleted pool or that deleted user continue to be usable until they expire.

    To reference a user from this identity source in your Cedar policies, use the following syntax.

    IdentityType::\"<CognitoUserPoolIdentifier>|<CognitoClientId>

    Where IdentityType is the string that you provide to the PrincipalEntityType parameter for this operation. The CognitoUserPoolId and CognitoClientId are defined by the Amazon Cognito user pool.

    ", + "idempotent":true + }, + "CreatePolicy":{ + "name":"CreatePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePolicyInput"}, + "output":{"shape":"CreatePolicyOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a Cedar policy and saves it in the specified policy store. You can create either a static policy or a policy linked to a policy template.

    • To create a static policy, provide the Cedar policy text in the StaticPolicy section of the PolicyDefinition.

    • To create a policy that is dynamically linked to a policy template, specify the policy template ID and the principal and resource to associate with this policy in the templateLinked section of the PolicyDefinition. If the policy template is ever updated, any policies linked to the policy template automatically use the updated template.

    Creating a policy causes it to be validated against the schema in the policy store. If the policy doesn't pass validation, the operation fails and the policy isn't stored.

    ", + "idempotent":true + }, + "CreatePolicyStore":{ + "name":"CreatePolicyStore", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePolicyStoreInput"}, + "output":{"shape":"CreatePolicyStoreOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a policy store. A policy store is a container for policy resources.

    ", + "idempotent":true + }, + "CreatePolicyTemplate":{ + "name":"CreatePolicyTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePolicyTemplateInput"}, + "output":{"shape":"CreatePolicyTemplateOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a policy template. A template can use placeholders for the principal and resource. A template must be instantiated into a policy by associating it with specific principals and resources to use for the placeholders. That instantiated policy can then be considered in authorization decisions. The instantiated policy works identically to any other policy, except that it is dynamically linked to the template. If the template changes, then any policies that are linked to that template are immediately updated as well.

    ", + "idempotent":true + }, + "DeleteIdentitySource":{ + "name":"DeleteIdentitySource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteIdentitySourceInput"}, + "output":{"shape":"DeleteIdentitySourceOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes an identity source that references an identity provider (IdP) such as Amazon Cognito. After you delete the identity source, you can no longer use tokens for identities from that identity source to represent principals in authorization queries made using IsAuthorizedWithToken. operations.

    ", + "idempotent":true + }, + "DeletePolicy":{ + "name":"DeletePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePolicyInput"}, + "output":{"shape":"DeletePolicyOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes the specified policy from the policy store.

    This operation is idempotent; if you specify a policy that doesn't exist, the request response returns a successful HTTP 200 status code.

    ", + "idempotent":true + }, + "DeletePolicyStore":{ + "name":"DeletePolicyStore", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePolicyStoreInput"}, + "output":{"shape":"DeletePolicyStoreOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes the specified policy store.

    This operation is idempotent. If you specify a policy store that does not exist, the request response will still return a successful HTTP 200 status code.

    ", + "idempotent":true + }, + "DeletePolicyTemplate":{ + "name":"DeletePolicyTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePolicyTemplateInput"}, + "output":{"shape":"DeletePolicyTemplateOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes the specified policy template from the policy store.

    This operation also deletes any policies that were created from the specified policy template. Those policies are immediately removed from all future API responses, and are asynchronously deleted from the policy store.

    ", + "idempotent":true + }, + "GetIdentitySource":{ + "name":"GetIdentitySource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetIdentitySourceInput"}, + "output":{"shape":"GetIdentitySourceOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Retrieves the details about the specified identity source.

    " + }, + "GetPolicy":{ + "name":"GetPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPolicyInput"}, + "output":{"shape":"GetPolicyOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Retrieves information about the specified policy.

    " + }, + "GetPolicyStore":{ + "name":"GetPolicyStore", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPolicyStoreInput"}, + "output":{"shape":"GetPolicyStoreOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Retrieves details about a policy store.

    " + }, + "GetPolicyTemplate":{ + "name":"GetPolicyTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPolicyTemplateInput"}, + "output":{"shape":"GetPolicyTemplateOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Retrieve the details for the specified policy template in the specified policy store.

    " + }, + "GetSchema":{ + "name":"GetSchema", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSchemaInput"}, + "output":{"shape":"GetSchemaOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Retrieve the details for the specified schema in the specified policy store.

    " + }, + "IsAuthorized":{ + "name":"IsAuthorized", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"IsAuthorizedInput"}, + "output":{"shape":"IsAuthorizedOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Makes an authorization decision about a service request described in the parameters. The information in the parameters can also define additional context that Verified Permissions can include in the evaluation. The request is evaluated against all matching policies in the specified policy store. The result of the decision is either Allow or Deny, along with a list of the policies that resulted in the decision.

    " + }, + "IsAuthorizedWithToken":{ + "name":"IsAuthorizedWithToken", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"IsAuthorizedWithTokenInput"}, + "output":{"shape":"IsAuthorizedWithTokenOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Makes an authorization decision about a service request described in the parameters. The principal in this request comes from an external identity source. The information in the parameters can also define additional context that Verified Permissions can include in the evaluation. The request is evaluated against all matching policies in the specified policy store. The result of the decision is either Allow or Deny, along with a list of the policies that resulted in the decision.

    If you delete a Amazon Cognito user pool or user, tokens from that deleted pool or that deleted user continue to be usable until they expire.

    " + }, + "ListIdentitySources":{ + "name":"ListIdentitySources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListIdentitySourcesInput"}, + "output":{"shape":"ListIdentitySourcesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns a paginated list of all of the identity sources defined in the specified policy store.

    " + }, + "ListPolicies":{ + "name":"ListPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPoliciesInput"}, + "output":{"shape":"ListPoliciesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns a paginated list of all policies stored in the specified policy store.

    " + }, + "ListPolicyStores":{ + "name":"ListPolicyStores", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPolicyStoresInput"}, + "output":{"shape":"ListPolicyStoresOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns a paginated list of all policy stores in the calling Amazon Web Services account.

    " + }, + "ListPolicyTemplates":{ + "name":"ListPolicyTemplates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPolicyTemplatesInput"}, + "output":{"shape":"ListPolicyTemplatesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Returns a paginated list of all policy templates in the specified policy store.

    " + }, + "PutSchema":{ + "name":"PutSchema", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutSchemaInput"}, + "output":{"shape":"PutSchemaOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates or updates the policy schema in the specified policy store. The schema is used to validate any Cedar policies and policy templates submitted to the policy store. Any changes to the schema validate only policies and templates submitted after the schema change. Existing policies and templates are not re-evaluated against the changed schema. If you later update a policy, then it is evaluated against the new schema at that time.

    ", + "idempotent":true + }, + "UpdateIdentitySource":{ + "name":"UpdateIdentitySource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateIdentitySourceInput"}, + "output":{"shape":"UpdateIdentitySourceOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates the specified identity source to use a new identity provider (IdP) source, or to change the mapping of identities from the IdP to a different principal entity type.

    ", + "idempotent":true + }, + "UpdatePolicy":{ + "name":"UpdatePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdatePolicyInput"}, + "output":{"shape":"UpdatePolicyOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Modifies a Cedar static policy in the specified policy store. You can change only certain elements of the UpdatePolicyDefinition parameter. You can directly update only static policies. To change a template-linked policy, you must update the template instead, using UpdatePolicyTemplate.

    If policy validation is enabled in the policy store, then updating a static policy causes Verified Permissions to validate the policy against the schema in the policy store. If the updated static policy doesn't pass validation, the operation fails and the update isn't stored.

    ", + "idempotent":true + }, + "UpdatePolicyStore":{ + "name":"UpdatePolicyStore", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdatePolicyStoreInput"}, + "output":{"shape":"UpdatePolicyStoreOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Modifies the validation setting for a policy store.

    ", + "idempotent":true + }, + "UpdatePolicyTemplate":{ + "name":"UpdatePolicyTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdatePolicyTemplateInput"}, + "output":{"shape":"UpdatePolicyTemplateOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates the specified policy template. You can update only the description and the some elements of the policyBody.

    Changes you make to the policy template content are immediately reflected in authorization decisions that involve all template-linked policies instantiated from this template.

    ", + "idempotent":true + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    You don't have sufficient access to perform this action.

    ", + "exception":true + }, + "ActionId":{ + "type":"string", + "max":200, + "min":1, + "pattern":".*" + }, + "ActionIdentifier":{ + "type":"structure", + "required":[ + "actionType", + "actionId" + ], + "members":{ + "actionType":{ + "shape":"ActionType", + "documentation":"

    The type of an action.

    " + }, + "actionId":{ + "shape":"ActionId", + "documentation":"

    The ID of an action.

    " + } + }, + "documentation":"

    Contains information about an action for a request for which an authorization decision is made.

    This data type is used as an request parameter to the IsAuthorized and IsAuthorizedWithToken operations.

    Example: { \"actionId\": \"<action name>\", \"actionType\": \"Action\" }

    " + }, + "ActionType":{ + "type":"string", + "max":200, + "min":1, + "pattern":"Action$|^.+::Action" + }, + "AttributeValue":{ + "type":"structure", + "members":{ + "boolean":{ + "shape":"BooleanAttribute", + "documentation":"

    An attribute value of Boolean type.

    Example: {\"boolean\": true}

    " + }, + "entityIdentifier":{ + "shape":"EntityIdentifier", + "documentation":"

    An attribute value of type EntityIdentifier.

    Example: \"entityIdentifier\": { \"entityId\": \"<id>\", \"entityType\": \"<entity type>\"}

    " + }, + "long":{ + "shape":"LongAttribute", + "documentation":"

    An attribute value of Long type.

    Example: {\"long\": 0}

    " + }, + "string":{ + "shape":"StringAttribute", + "documentation":"

    An attribute value of String type.

    Example: {\"string\": \"abc\"}

    " + }, + "set":{ + "shape":"SetAttribute", + "documentation":"

    An attribute value of Set type.

    Example: {\"set\": [ {} ] }

    " + }, + "record":{ + "shape":"RecordAttribute", + "documentation":"

    An attribute value of Record type.

    Example: {\"record\": { \"keyName\": {} } }

    " + } + }, + "documentation":"

    The value of an attribute.

    Contains information about the runtime context for a request for which an authorization decision is made.

    This data type is used as a member of the ContextDefinition structure which is uses as a request parameter for the IsAuthorized and IsAuthorizedWithToken operations.

    ", + "union":true + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "BooleanAttribute":{ + "type":"boolean", + "box":true + }, + "ClientId":{ + "type":"string", + "max":255, + "min":1, + "pattern":".*" + }, + "ClientIds":{ + "type":"list", + "member":{"shape":"ClientId"}, + "max":1000, + "min":0 + }, + "CognitoUserPoolConfiguration":{ + "type":"structure", + "required":["userPoolArn"], + "members":{ + "userPoolArn":{ + "shape":"UserPoolArn", + "documentation":"

    The Amazon Resource Name (ARN) of the Amazon Cognito user pool that contains the identities to be authorized.

    Example: \"UserPoolArn\": \"cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5\"

    " + }, + "clientIds":{ + "shape":"ClientIds", + "documentation":"

    The unique application client IDs that are associated with the specified Amazon Cognito user pool.

    Example: \"ClientIds\": [\"&ExampleCogClientId;\"]

    " + } + }, + "documentation":"

    The configuration for an identity source that represents a connection to an Amazon Cognito user pool used as an identity provider for Verified Permissions.

    This data type is used as a field that is part of an Configuration structure that is used as a parameter to the Configuration.

    Example:\"CognitoUserPoolConfiguration\":{\"UserPoolArn\":\"cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5\",\"ClientIds\": [\"a1b2c3d4e5f6g7h8i9j0kalbmc\"]}

    " + }, + "Configuration":{ + "type":"structure", + "members":{ + "cognitoUserPoolConfiguration":{ + "shape":"CognitoUserPoolConfiguration", + "documentation":"

    Contains configuration details of a Amazon Cognito user pool that Verified Permissions can use as a source of authenticated identities as entities. It specifies the Amazon Resource Name (ARN) of a Amazon Cognito user pool and one or more application client IDs.

    Example: \"configuration\":{\"cognitoUserPoolConfiguration\":{\"userPoolArn\":\"cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5\",\"clientIds\": [\"a1b2c3d4e5f6g7h8i9j0kalbmc\"]}}

    " + } + }, + "documentation":"

    Contains configuration information used when creating a new identity source.

    At this time, the only valid member of this structure is a Amazon Cognito user pool configuration.

    You must specify a userPoolArn, and optionally, a ClientId.

    This data type is used as a request parameter for the CreateIdentitySource operation.

    ", + "union":true + }, + "ConflictException":{ + "type":"structure", + "required":[ + "message", + "resources" + ], + "members":{ + "message":{"shape":"String"}, + "resources":{ + "shape":"ResourceConflictList", + "documentation":"

    The list of resources referenced with this failed request.

    " + } + }, + "documentation":"

    The request failed because another request to modify a resource occurred at the same.

    ", + "exception":true + }, + "ContextDefinition":{ + "type":"structure", + "members":{ + "contextMap":{ + "shape":"ContextMap", + "documentation":"

    An list of attributes that are needed to successfully evaluate an authorization request. Each attribute in this array must include a map of a data type and its value.

    Example: \"Context\":{\"<KeyName1>\":{\"boolean\":true},\"<KeyName2>\":{\"long\":1234}}

    " + } + }, + "documentation":"

    Contains additional details about the context of the request. Verified Permissions evaluates this information in an authorization request as part of the when and unless clauses in a policy.

    This data type is used as a request parameter for the IsAuthorized and IsAuthorizedWithToken operations.

    Example: \"context\":{\"Context\":{\"<KeyName1>\":{\"boolean\":true},\"<KeyName2>\":{\"long\":1234}}}

    ", + "union":true + }, + "ContextMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"AttributeValue"}, + "min":0 + }, + "CreateIdentitySourceInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "configuration" + ], + "members":{ + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

    Specifies a unique, case-sensitive ID that you provide to ensure the idempotency of the request. This lets you safely retry the request without accidentally performing the same operation a second time. Passing the same value to a later call to an operation requires that you also pass the same value for all other parameters. We recommend that you use a UUID type of value..

    If you don't provide this value, then Amazon Web Services generates a random one for you.

    If you retry the operation with the same ClientToken, but with different parameters, the retry fails with an IdempotentParameterMismatch error.

    ", + "idempotencyToken":true + }, + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    Specifies the ID of the policy store in which you want to store this identity source. Only policies and requests made using this policy store can reference identities from the identity provider configured in the new identity source.

    " + }, + "configuration":{ + "shape":"Configuration", + "documentation":"

    Specifies the details required to communicate with the identity provider (IdP) associated with this identity source.

    At this time, the only valid member of this structure is a Amazon Cognito user pool configuration.

    You must specify a UserPoolArn, and optionally, a ClientId.

    " + }, + "principalEntityType":{ + "shape":"PrincipalEntityType", + "documentation":"

    Specifies the namespace and data type of the principals generated for identities authenticated by the new identity source.

    " + } + } + }, + "CreateIdentitySourceOutput":{ + "type":"structure", + "required":[ + "createdDate", + "identitySourceId", + "lastUpdatedDate", + "policyStoreId" + ], + "members":{ + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time the identity source was originally created.

    " + }, + "identitySourceId":{ + "shape":"IdentitySourceId", + "documentation":"

    The unique ID of the new identity source.

    " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time the identity source was most recently updated.

    " + }, + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    The ID of the policy store that contains the identity source.

    " + } + } + }, + "CreatePolicyInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "definition" + ], + "members":{ + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

    Specifies a unique, case-sensitive ID that you provide to ensure the idempotency of the request. This lets you safely retry the request without accidentally performing the same operation a second time. Passing the same value to a later call to an operation requires that you also pass the same value for all other parameters. We recommend that you use a UUID type of value..

    If you don't provide this value, then Amazon Web Services generates a random one for you.

    If you retry the operation with the same ClientToken, but with different parameters, the retry fails with an IdempotentParameterMismatch error.

    ", + "idempotencyToken":true + }, + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    Specifies the PolicyStoreId of the policy store you want to store the policy in.

    " + }, + "definition":{ + "shape":"PolicyDefinition", + "documentation":"

    A structure that specifies the policy type and content to use for the new policy. You must include either a static or a templateLinked element. The policy content must be written in the Cedar policy language.

    " + } + } + }, + "CreatePolicyOutput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyId", + "policyType", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    The ID of the policy store that contains the new policy.

    " + }, + "policyId":{ + "shape":"PolicyId", + "documentation":"

    The unique ID of the new policy.

    " + }, + "policyType":{ + "shape":"PolicyType", + "documentation":"

    The policy type of the new policy.

    " + }, + "principal":{ + "shape":"EntityIdentifier", + "documentation":"

    The principal specified in the new policy's scope. This response element isn't present when principal isn't specified in the policy content.

    " + }, + "resource":{ + "shape":"EntityIdentifier", + "documentation":"

    The resource specified in the new policy's scope. This response element isn't present when the resource isn't specified in the policy content.

    " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time the policy was originally created.

    " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time the policy was last updated.

    " + } + } + }, + "CreatePolicyStoreInput":{ + "type":"structure", + "required":["validationSettings"], + "members":{ + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

    Specifies a unique, case-sensitive ID that you provide to ensure the idempotency of the request. This lets you safely retry the request without accidentally performing the same operation a second time. Passing the same value to a later call to an operation requires that you also pass the same value for all other parameters. We recommend that you use a UUID type of value..

    If you don't provide this value, then Amazon Web Services generates a random one for you.

    If you retry the operation with the same ClientToken, but with different parameters, the retry fails with an IdempotentParameterMismatch error.

    ", + "idempotencyToken":true + }, + "validationSettings":{ + "shape":"ValidationSettings", + "documentation":"

    Specifies the validation setting for this policy store.

    Currently, the only valid and required value is Mode.

    We recommend that you turn on STRICT mode only after you define a schema. If a schema doesn't exist, then STRICT mode causes any policy to fail validation, and Verified Permissions rejects the policy. You can turn off validation by using the UpdatePolicyStore. Then, when you have a schema defined, use UpdatePolicyStore again to turn validation back on.

    " + } + } + }, + "CreatePolicyStoreOutput":{ + "type":"structure", + "required":[ + "policyStoreId", + "arn", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    The unique ID of the new policy store.

    " + }, + "arn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the new policy store.

    " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time the policy store was originally created.

    " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time the policy store was last updated.

    " + } + } + }, + "CreatePolicyTemplateInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "statement" + ], + "members":{ + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

    Specifies a unique, case-sensitive ID that you provide to ensure the idempotency of the request. This lets you safely retry the request without accidentally performing the same operation a second time. Passing the same value to a later call to an operation requires that you also pass the same value for all other parameters. We recommend that you use a UUID type of value..

    If you don't provide this value, then Amazon Web Services generates a random one for you.

    If you retry the operation with the same ClientToken, but with different parameters, the retry fails with an IdempotentParameterMismatch error.

    ", + "idempotencyToken":true + }, + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    The ID of the policy store in which to create the policy template.

    " + }, + "description":{ + "shape":"PolicyTemplateDescription", + "documentation":"

    Specifies a description for the policy template.

    " + }, + "statement":{ + "shape":"PolicyStatement", + "documentation":"

    Specifies the content that you want to use for the new policy template, written in the Cedar policy language.

    " + } + } + }, + "CreatePolicyTemplateOutput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyTemplateId", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    The ID of the policy store that contains the policy template.

    " + }, + "policyTemplateId":{ + "shape":"PolicyTemplateId", + "documentation":"

    The unique ID of the new policy template.

    " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time the policy template was originally created.

    " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time the policy template was most recently updated.

    " + } + } + }, + "Decision":{ + "type":"string", + "enum":[ + "ALLOW", + "DENY" + ] + }, + "DeleteIdentitySourceInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "identitySourceId" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    Specifies the ID of the policy store that contains the identity source that you want to delete.

    " + }, + "identitySourceId":{ + "shape":"IdentitySourceId", + "documentation":"

    Specifies the ID of the identity source that you want to delete.

    " + } + } + }, + "DeleteIdentitySourceOutput":{ + "type":"structure", + "members":{ + } + }, + "DeletePolicyInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyId" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    Specifies the ID of the policy store that contains the policy that you want to delete.

    " + }, + "policyId":{ + "shape":"PolicyId", + "documentation":"

    Specifies the ID of the policy that you want to delete.

    " + } + } + }, + "DeletePolicyOutput":{ + "type":"structure", + "members":{ + } + }, + "DeletePolicyStoreInput":{ + "type":"structure", + "required":["policyStoreId"], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    Specifies the ID of the policy store that you want to delete.

    " + } + } + }, + "DeletePolicyStoreOutput":{ + "type":"structure", + "members":{ + } + }, + "DeletePolicyTemplateInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyTemplateId" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    Specifies the ID of the policy store that contains the policy template that you want to delete.

    " + }, + "policyTemplateId":{ + "shape":"PolicyTemplateId", + "documentation":"

    Specifies the ID of the policy template that you want to delete.

    " + } + } + }, + "DeletePolicyTemplateOutput":{ + "type":"structure", + "members":{ + } + }, + "DeterminingPolicyItem":{ + "type":"structure", + "required":["policyId"], + "members":{ + "policyId":{ + "shape":"PolicyId", + "documentation":"

    The Id of a policy that determined to an authorization decision.

    Example: \"policyId\":\"SPEXAMPLEabcdefg111111\"

    " + } + }, + "documentation":"

    Contains information about one of the policies that determined an authorization decision.

    This data type is used as an element in a response parameter for the IsAuthorized and IsAuthorizedWithToken operations.

    Example: \"determiningPolicies\":[{\"policyId\":\"SPEXAMPLEabcdefg111111\"}]

    " + }, + "DeterminingPolicyList":{ + "type":"list", + "member":{"shape":"DeterminingPolicyItem"} + }, + "DiscoveryUrl":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"https://.*" + }, + "EntitiesDefinition":{ + "type":"structure", + "members":{ + "entityList":{ + "shape":"EntityList", + "documentation":"

    An array of entities that are needed to successfully evaluate an authorization request. Each entity in this array must include an identifier for the entity, the attributes of the entity, and a list of any parent entities.

    " + } + }, + "documentation":"

    Contains the list of entities to be considered during an authorization request. This includes all principals, resources, and actions required to successfully evaluate the request.

    This data type is used as a field in the response parameter for the IsAuthorized and IsAuthorizedWithToken operations.

    ", + "union":true + }, + "EntityAttributes":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"AttributeValue"}, + "min":0 + }, + "EntityId":{ + "type":"string", + "max":200, + "min":1, + "pattern":".*" + }, + "EntityIdentifier":{ + "type":"structure", + "required":[ + "entityType", + "entityId" + ], + "members":{ + "entityType":{ + "shape":"EntityType", + "documentation":"

    The type of an entity.

    Example: \"entityType\":\"typeName\"

    " + }, + "entityId":{ + "shape":"EntityId", + "documentation":"

    The identifier of an entity.

    \"entityId\":\"identifier\"

    " + } + }, + "documentation":"

    Contains the identifier of an entity, including its ID and type.

    This data type is used as a request parameter for IsAuthorized operation, and as a response parameter for the CreatePolicy, GetPolicy, and UpdatePolicy operations.

    Example: {\"entityId\":\"string\",\"entityType\":\"string\"}

    " + }, + "EntityItem":{ + "type":"structure", + "required":["identifier"], + "members":{ + "identifier":{ + "shape":"EntityIdentifier", + "documentation":"

    The identifier of the entity.

    " + }, + "attributes":{ + "shape":"EntityAttributes", + "documentation":"

    A list of attributes for the entity.

    " + }, + "parents":{ + "shape":"ParentList", + "documentation":"

    The parents in the hierarchy that contains the entity.

    " + } + }, + "documentation":"

    Contains information about an entity that can be referenced in a Cedar policy.

    This data type is used as one of the fields in the EntitiesDefinition structure.

    { \"id\": { \"entityType\": \"Photo\", \"entityId\": \"VacationPhoto94.jpg\" }, \"Attributes\": {}, \"Parents\": [ { \"entityType\": \"Album\", \"entityId\": \"alice_folder\" } ] }

    " + }, + "EntityList":{ + "type":"list", + "member":{"shape":"EntityItem"}, + "min":0 + }, + "EntityReference":{ + "type":"structure", + "members":{ + "unspecified":{ + "shape":"Boolean", + "documentation":"

    Used to indicate that a principal or resource is not specified. This can be used to search for policies that are not associated with a specific principal or resource.

    " + }, + "identifier":{ + "shape":"EntityIdentifier", + "documentation":"

    The identifier of the entity. It can consist of either an EntityType and EntityId, a principal, or a resource.

    " + } + }, + "documentation":"

    Contains information about a principal or resource that can be referenced in a Cedar policy.

    This data type is used as part of the PolicyFilter structure that is used as a request parameter for the ListPolicies operation..

    ", + "union":true + }, + "EntityType":{ + "type":"string", + "max":200, + "min":1, + "pattern":".*" + }, + "EvaluationErrorItem":{ + "type":"structure", + "required":["errorDescription"], + "members":{ + "errorDescription":{ + "shape":"String", + "documentation":"

    The error description.

    " + } + }, + "documentation":"

    Contains a description of an evaluation error.

    This data type is used as a request parameter in the IsAuthorized and IsAuthorizedWithToken operations.

    " + }, + "EvaluationErrorList":{ + "type":"list", + "member":{"shape":"EvaluationErrorItem"} + }, + "GetIdentitySourceInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "identitySourceId" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    Specifies the ID of the policy store that contains the identity source you want information about.

    " + }, + "identitySourceId":{ + "shape":"IdentitySourceId", + "documentation":"

    Specifies the ID of the identity source you want information about.

    " + } + } + }, + "GetIdentitySourceOutput":{ + "type":"structure", + "required":[ + "createdDate", + "details", + "identitySourceId", + "lastUpdatedDate", + "policyStoreId", + "principalEntityType" + ], + "members":{ + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time that the identity source was originally created.

    " + }, + "details":{ + "shape":"IdentitySourceDetails", + "documentation":"

    A structure that describes the configuration of the identity source.

    " + }, + "identitySourceId":{ + "shape":"IdentitySourceId", + "documentation":"

    The ID of the identity source.

    " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time that the identity source was most recently updated.

    " + }, + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    The ID of the policy store that contains the identity source.

    " + }, + "principalEntityType":{ + "shape":"PrincipalEntityType", + "documentation":"

    The data type of principals generated for identities authenticated by this identity source.

    " + } + } + }, + "GetPolicyInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyId" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    Specifies the ID of the policy store that contains the policy that you want information about.

    " + }, + "policyId":{ + "shape":"PolicyId", + "documentation":"

    Specifies the ID of the policy you want information about.

    " + } + } + }, + "GetPolicyOutput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyId", + "policyType", + "definition", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    The ID of the policy store that contains the policy that you want information about.

    " + }, + "policyId":{ + "shape":"PolicyId", + "documentation":"

    The unique ID of the policy that you want information about.

    " + }, + "policyType":{ + "shape":"PolicyType", + "documentation":"

    The type of the policy.

    " + }, + "principal":{ + "shape":"EntityIdentifier", + "documentation":"

    The principal specified in the policy's scope. This element isn't included in the response when Principal isn't present in the policy content.

    " + }, + "resource":{ + "shape":"EntityIdentifier", + "documentation":"

    The resource specified in the policy's scope. This element isn't included in the response when Resource isn't present in the policy content.

    " + }, + "definition":{ + "shape":"PolicyDefinitionDetail", + "documentation":"

    The definition of the requested policy.

    " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time that the policy was originally created.

    " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time that the policy was last updated.

    " + } + } + }, + "GetPolicyStoreInput":{ + "type":"structure", + "required":["policyStoreId"], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    Specifies the ID of the policy store that you want information about.

    " + } + } + }, + "GetPolicyStoreOutput":{ + "type":"structure", + "required":[ + "policyStoreId", + "arn", + "validationSettings", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    The ID of the policy store;

    " + }, + "arn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the policy store.

    " + }, + "validationSettings":{ + "shape":"ValidationSettings", + "documentation":"

    The current validation settings for the policy store.

    " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time that the policy store was originally created.

    " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time that the policy store was last updated.

    " + } + } + }, + "GetPolicyTemplateInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyTemplateId" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    Specifies the ID of the policy store that contains the policy template that you want information about.

    " + }, + "policyTemplateId":{ + "shape":"PolicyTemplateId", + "documentation":"

    Specifies the ID of the policy template that you want information about.

    " + } + } + }, + "GetPolicyTemplateOutput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyTemplateId", + "statement", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    The ID of the policy store that contains the policy template.

    " + }, + "policyTemplateId":{ + "shape":"PolicyTemplateId", + "documentation":"

    The ID of the policy template.

    " + }, + "description":{ + "shape":"PolicyTemplateDescription", + "documentation":"

    The description of the policy template.

    " + }, + "statement":{ + "shape":"PolicyStatement", + "documentation":"

    The content of the body of the policy template written in the Cedar policy language.

    " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time that the policy template was originally created.

    " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time that the policy template was most recently updated.

    " + } + } + }, + "GetSchemaInput":{ + "type":"structure", + "required":["policyStoreId"], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    Specifies the ID of the policy store that contains the schema.

    " + } + } + }, + "GetSchemaOutput":{ + "type":"structure", + "required":[ + "policyStoreId", + "schema", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    The ID of the policy store that contains the schema.

    " + }, + "schema":{ + "shape":"SchemaJson", + "documentation":"

    The body of the schema, written in Cedar schema JSON.

    " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time that the schema was originally created.

    " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time that the schema was most recently updated.

    " + } + } + }, + "IdempotencyToken":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9-]*" + }, + "IdentitySourceDetails":{ + "type":"structure", + "members":{ + "clientIds":{ + "shape":"ClientIds", + "documentation":"

    The application client IDs associated with the specified Amazon Cognito user pool that are enabled for this identity source.

    " + }, + "userPoolArn":{ + "shape":"UserPoolArn", + "documentation":"

    The Amazon Resource Name (ARN) of the Amazon Cognito user pool whose identities are accessible to this Verified Permissions policy store.

    " + }, + "discoveryUrl":{ + "shape":"DiscoveryUrl", + "documentation":"

    The well-known URL that points to this user pool's OIDC discovery endpoint. This is a URL string in the following format. This URL replaces the placeholders for both the Amazon Web Services Region and the user pool identifier with those appropriate for this user pool.

    https://cognito-idp.<region>.amazonaws.com/<user-pool-id>/.well-known/openid-configuration

    " + }, + "openIdIssuer":{ + "shape":"OpenIdIssuer", + "documentation":"

    A string that identifies the type of OIDC service represented by this identity source.

    At this time, the only valid value is cognito.

    " + } + }, + "documentation":"

    A structure that contains configuration of the identity source.

    This data type is used as a response parameter for the CreateIdentitySource operation.

    " + }, + "IdentitySourceFilter":{ + "type":"structure", + "members":{ + "principalEntityType":{ + "shape":"PrincipalEntityType", + "documentation":"

    The Cedar entity type of the principals returned by the identity provider (IdP) associated with this identity source.

    " + } + }, + "documentation":"

    A structure that defines characteristics of an identity source that you can use to filter.

    This data type is used as a request parameter for the ListIdentityStores operation.

    " + }, + "IdentitySourceFilters":{ + "type":"list", + "member":{"shape":"IdentitySourceFilter"}, + "max":10, + "min":0 + }, + "IdentitySourceId":{ + "type":"string", + "max":200, + "min":1, + "pattern":"[a-zA-Z0-9-]*" + }, + "IdentitySourceItem":{ + "type":"structure", + "required":[ + "createdDate", + "details", + "identitySourceId", + "lastUpdatedDate", + "policyStoreId", + "principalEntityType" + ], + "members":{ + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time the identity source was originally created.

    " + }, + "details":{ + "shape":"IdentitySourceItemDetails", + "documentation":"

    A structure that contains the details of the associated identity provider (IdP).

    " + }, + "identitySourceId":{ + "shape":"IdentitySourceId", + "documentation":"

    The unique identifier of the identity source.

    " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time the identity source was most recently updated.

    " + }, + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    The identifier of the policy store that contains the identity source.

    " + }, + "principalEntityType":{ + "shape":"PrincipalEntityType", + "documentation":"

    The Cedar entity type of the principals returned from the IdP associated with this identity source.

    " + } + }, + "documentation":"

    A structure that defines an identity source.

    This data type is used as a request parameter for the ListIdentityStores operation.

    " + }, + "IdentitySourceItemDetails":{ + "type":"structure", + "members":{ + "clientIds":{ + "shape":"ClientIds", + "documentation":"

    The application client IDs associated with the specified Amazon Cognito user pool that are enabled for this identity source.

    " + }, + "userPoolArn":{ + "shape":"UserPoolArn", + "documentation":"

    The Amazon Cognito user pool whose identities are accessible to this Verified Permissions policy store.

    " + }, + "discoveryUrl":{ + "shape":"DiscoveryUrl", + "documentation":"

    The well-known URL that points to this user pool's OIDC discovery endpoint. This is a URL string in the following format. This URL replaces the placeholders for both the Amazon Web Services Region and the user pool identifier with those appropriate for this user pool.

    https://cognito-idp.<region>.amazonaws.com/<user-pool-id>/.well-known/openid-configuration

    " + }, + "openIdIssuer":{ + "shape":"OpenIdIssuer", + "documentation":"

    A string that identifies the type of OIDC service represented by this identity source.

    At this time, the only valid value is cognito.

    " + } + }, + "documentation":"

    A structure that contains configuration of the identity source.

    This data type is used as a response parameter for the CreateIdentitySource operation.

    " + }, + "IdentitySources":{ + "type":"list", + "member":{"shape":"IdentitySourceItem"} + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    The request failed because of an internal error. Try your request again later

    ", + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "IsAuthorizedInput":{ + "type":"structure", + "required":["policyStoreId"], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    Specifies the ID of the policy store. Policies in this policy store will be used to make an authorization decision for the input.

    " + }, + "principal":{ + "shape":"EntityIdentifier", + "documentation":"

    Specifies the principal for which the authorization decision is to be made.

    " + }, + "action":{ + "shape":"ActionIdentifier", + "documentation":"

    Specifies the requested action to be authorized. For example, is the principal authorized to perform this action on the resource?

    " + }, + "resource":{ + "shape":"EntityIdentifier", + "documentation":"

    Specifies the resource for which the authorization decision is to be made.

    " + }, + "context":{ + "shape":"ContextDefinition", + "documentation":"

    Specifies additional context that can be used to make more granular authorization decisions.

    " + }, + "entities":{ + "shape":"EntitiesDefinition", + "documentation":"

    Specifies the list of entities and their associated attributes that Verified Permissions can examine when evaluating the policies.

    " + } + } + }, + "IsAuthorizedOutput":{ + "type":"structure", + "required":[ + "decision", + "determiningPolicies", + "errors" + ], + "members":{ + "decision":{ + "shape":"Decision", + "documentation":"

    An authorization decision that indicates if the authorization request should be allowed or denied.

    " + }, + "determiningPolicies":{ + "shape":"DeterminingPolicyList", + "documentation":"

    The list of determining policies used to make the authorization decision. For example, if there are two matching policies, where one is a forbid and the other is a permit, then the forbid policy will be the determining policy. In the case of multiple matching permit policies then there would be multiple determining policies. In the case that no policies match, and hence the response is DENY, there would be no determining policies.

    " + }, + "errors":{ + "shape":"EvaluationErrorList", + "documentation":"

    Errors that occurred while making an authorization decision, for example, a policy references an Entity or entity Attribute that does not exist in the slice.

    " + } + } + }, + "IsAuthorizedWithTokenInput":{ + "type":"structure", + "required":["policyStoreId"], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    Specifies the ID of the policy store. Policies in this policy store will be used to make an authorization decision for the input.

    " + }, + "identityToken":{ + "shape":"Token", + "documentation":"

    Specifies an identity token for the principal to be authorized. This token is provided to you by the identity provider (IdP) associated with the specified identity source. You must specify either an AccessToken or an IdentityToken, but not both.

    " + }, + "accessToken":{ + "shape":"Token", + "documentation":"

    Specifies an access token for the principal to be authorized. This token is provided to you by the identity provider (IdP) associated with the specified identity source. You must specify either an AccessToken or an IdentityToken, but not both.

    " + }, + "action":{ + "shape":"ActionIdentifier", + "documentation":"

    Specifies the requested action to be authorized. Is the specified principal authorized to perform this action on the specified resource.

    " + }, + "resource":{ + "shape":"EntityIdentifier", + "documentation":"

    Specifies the resource for which the authorization decision is made. For example, is the principal allowed to perform the action on the resource?

    " + }, + "context":{ + "shape":"ContextDefinition", + "documentation":"

    Specifies additional context that can be used to make more granular authorization decisions.

    " + }, + "entities":{ + "shape":"EntitiesDefinition", + "documentation":"

    Specifies the list of entities and their associated attributes that Verified Permissions can examine when evaluating the policies.

    " + } + } + }, + "IsAuthorizedWithTokenOutput":{ + "type":"structure", + "required":[ + "decision", + "determiningPolicies", + "errors" + ], + "members":{ + "decision":{ + "shape":"Decision", + "documentation":"

    An authorization decision that indicates if the authorization request should be allowed or denied.

    " + }, + "determiningPolicies":{ + "shape":"DeterminingPolicyList", + "documentation":"

    The list of determining policies used to make the authorization decision. For example, if there are multiple matching policies, where at least one is a forbid policy, then because forbid always overrides permit the forbid policies are the determining policies. If all matching policies are permit policies, then those policies are the determining policies. When no policies match and the response is the default DENY, there are no determining policies.

    " + }, + "errors":{ + "shape":"EvaluationErrorList", + "documentation":"

    Errors that occurred while making an authorization decision. For example, a policy references an entity or entity attribute that does not exist in the slice.

    " + } + } + }, + "ListIdentitySourcesInput":{ + "type":"structure", + "required":["policyStoreId"], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    Specifies the ID of the policy store that contains the identity sources that you want to list.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    Specifies that you want to receive the next page of results. Valid only if you received a NextToken response in the previous request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous call's NextToken response to request the next page of results.

    " + }, + "maxResults":{ + "shape":"ListIdentitySourcesMaxResults", + "documentation":"

    Specifies the total number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " + }, + "filters":{ + "shape":"IdentitySourceFilters", + "documentation":"

    Specifies characteristics of an identity source that you can use to limit the output to matching identity sources.

    " + } + } + }, + "ListIdentitySourcesMaxResults":{ + "type":"integer", + "box":true, + "max":200, + "min":1 + }, + "ListIdentitySourcesOutput":{ + "type":"structure", + "required":["identitySources"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

    If present, this value indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null. This indicates that this is the last page of results.

    " + }, + "identitySources":{ + "shape":"IdentitySources", + "documentation":"

    The list of identity sources stored in the specified policy store.

    " + } + } + }, + "ListPoliciesInput":{ + "type":"structure", + "required":["policyStoreId"], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    Specifies the ID of the policy store you want to list policies from.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    Specifies that you want to receive the next page of results. Valid only if you received a NextToken response in the previous request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous call's NextToken response to request the next page of results.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    Specifies the total number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " + }, + "filter":{ + "shape":"PolicyFilter", + "documentation":"

    Specifies a filter that limits the response to only policies that match the specified criteria. For example, you list only the policies that reference a specified principal.

    " + } + } + }, + "ListPoliciesOutput":{ + "type":"structure", + "required":["policies"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

    If present, this value indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null. This indicates that this is the last page of results.

    " + }, + "policies":{ + "shape":"PolicyList", + "documentation":"

    Lists all policies that are available in the specified policy store.

    " + } + } + }, + "ListPolicyStoresInput":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

    Specifies that you want to receive the next page of results. Valid only if you received a NextToken response in the previous request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous call's NextToken response to request the next page of results.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    Specifies the total number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " + } + } + }, + "ListPolicyStoresOutput":{ + "type":"structure", + "required":["policyStores"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

    If present, this value indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null. This indicates that this is the last page of results.

    " + }, + "policyStores":{ + "shape":"PolicyStoreList", + "documentation":"

    The list of policy stores in the account.

    " + } + } + }, + "ListPolicyTemplatesInput":{ + "type":"structure", + "required":["policyStoreId"], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    Specifies the ID of the policy store that contains the policy templates you want to list.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    Specifies that you want to receive the next page of results. Valid only if you received a NextToken response in the previous request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous call's NextToken response to request the next page of results.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    Specifies the total number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " + } + } + }, + "ListPolicyTemplatesOutput":{ + "type":"structure", + "required":["policyTemplates"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

    If present, this value indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null. This indicates that this is the last page of results.

    " + }, + "policyTemplates":{ + "shape":"PolicyTemplatesList", + "documentation":"

    The list of the policy templates in the specified policy store.

    " + } + } + }, + "LongAttribute":{ + "type":"long", + "box":true + }, + "MaxResults":{ + "type":"integer", + "box":true, + "max":20, + "min":1 + }, + "Namespace":{ + "type":"string", + "max":100, + "min":1, + "pattern":".*" + }, + "NamespaceList":{ + "type":"list", + "member":{"shape":"Namespace"} + }, + "NextToken":{ + "type":"string", + "max":8000, + "min":1, + "pattern":"[A-Za-z0-9-_=+/\\.]*" + }, + "OpenIdIssuer":{ + "type":"string", + "enum":["COGNITO"] + }, + "ParentList":{ + "type":"list", + "member":{"shape":"EntityIdentifier"}, + "max":100, + "min":0 + }, + "PolicyDefinition":{ + "type":"structure", + "members":{ + "static":{ + "shape":"StaticPolicyDefinition", + "documentation":"

    A structure that describes a static policy. An static policy doesn't use a template or allow placeholders for entities.

    " + }, + "templateLinked":{ + "shape":"TemplateLinkedPolicyDefinition", + "documentation":"

    A structure that describes a policy that was instantiated from a template. The template can specify placeholders for principal and resource. When you use CreatePolicy to create a policy from a template, you specify the exact principal and resource to use for the instantiated policy.

    " + } + }, + "documentation":"

    A structure that contains the details for a Cedar policy definition. It includes the policy type, a description, and a policy body. This is a top level data type used to create a policy.

    This data type is used as a request parameter for the CreatePolicy operation. This structure must always have either an static or a templateLinked element.

    ", + "union":true + }, + "PolicyDefinitionDetail":{ + "type":"structure", + "members":{ + "static":{ + "shape":"StaticPolicyDefinitionDetail", + "documentation":"

    Information about a static policy that wasn't created with a policy template.

    " + }, + "templateLinked":{ + "shape":"TemplateLinkedPolicyDefinitionDetail", + "documentation":"

    Information about a template-linked policy that was created by instantiating a policy template.

    " + } + }, + "documentation":"

    A structure that describes a policy definition. It must always have either an static or a templateLinked element.

    This data type is used as a response parameter for the GetPolicy operation.

    ", + "union":true + }, + "PolicyDefinitionItem":{ + "type":"structure", + "members":{ + "static":{ + "shape":"StaticPolicyDefinitionItem", + "documentation":"

    Information about a static policy that wasn't created with a policy template.

    " + }, + "templateLinked":{ + "shape":"TemplateLinkedPolicyDefinitionItem", + "documentation":"

    Information about a template-linked policy that was created by instantiating a policy template.

    " + } + }, + "documentation":"

    A structure that describes a PolicyDefinintion. It will always have either an StaticPolicy or a TemplateLinkedPolicy element.

    This data type is used as a response parameter for the CreatePolicy and ListPolicies operations.

    ", + "union":true + }, + "PolicyFilter":{ + "type":"structure", + "members":{ + "principal":{ + "shape":"EntityReference", + "documentation":"

    Filters the output to only policies that reference the specified principal.

    " + }, + "resource":{ + "shape":"EntityReference", + "documentation":"

    Filters the output to only policies that reference the specified resource.

    " + }, + "policyType":{ + "shape":"PolicyType", + "documentation":"

    Filters the output to only policies of the specified type.

    " + }, + "policyTemplateId":{ + "shape":"PolicyTemplateId", + "documentation":"

    Filters the output to only template-linked policies that were instantiated from the specified policy template.

    " + } + }, + "documentation":"

    Contains information about a filter to refine policies returned in a query.

    This data type is used as a response parameter for the ListPolicies operation.

    " + }, + "PolicyId":{ + "type":"string", + "max":200, + "min":1, + "pattern":"[a-zA-Z0-9-]*" + }, + "PolicyItem":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyId", + "policyType", + "definition", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    The identifier of the PolicyStore where the policy you want information about is stored.

    " + }, + "policyId":{ + "shape":"PolicyId", + "documentation":"

    The identifier of the policy you want information about.

    " + }, + "policyType":{ + "shape":"PolicyType", + "documentation":"

    The type of the policy. This is one of the following values:

    • static

    • templateLinked

    " + }, + "principal":{ + "shape":"EntityIdentifier", + "documentation":"

    The principal associated with the policy.

    " + }, + "resource":{ + "shape":"EntityIdentifier", + "documentation":"

    The resource associated with the policy.

    " + }, + "definition":{ + "shape":"PolicyDefinitionItem", + "documentation":"

    The policy definition of an item in the list of policies returned.

    " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time the policy was created.

    " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time the policy was most recently updated.

    " + } + }, + "documentation":"

    Contains information about a policy.

    This data type is used as a response parameter for the ListPolicies operation.

    " + }, + "PolicyList":{ + "type":"list", + "member":{"shape":"PolicyItem"} + }, + "PolicyStatement":{ + "type":"string", + "max":10000, + "min":1 + }, + "PolicyStoreId":{ + "type":"string", + "max":200, + "min":1, + "pattern":"[a-zA-Z0-9-]*" + }, + "PolicyStoreItem":{ + "type":"structure", + "required":[ + "policyStoreId", + "arn", + "createdDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    The unique identifier of the policy store.

    " + }, + "arn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the policy store.

    " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time the policy was created.

    " + } + }, + "documentation":"

    Contains information about a policy store.

    This data type is used as a response parameter for the ListPolicyStores operation.

    " + }, + "PolicyStoreList":{ + "type":"list", + "member":{"shape":"PolicyStoreItem"} + }, + "PolicyTemplateDescription":{ + "type":"string", + "max":150, + "min":0 + }, + "PolicyTemplateId":{ + "type":"string", + "max":200, + "min":1, + "pattern":"[a-zA-Z0-9-]*" + }, + "PolicyTemplateItem":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyTemplateId", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    The unique identifier of the policy store that contains the template.

    " + }, + "policyTemplateId":{ + "shape":"PolicyTemplateId", + "documentation":"

    The unique identifier of the policy template.

    " + }, + "description":{ + "shape":"PolicyTemplateDescription", + "documentation":"

    The description attached to the policy template.

    " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time that the policy template was created.

    " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time that the policy template was most recently updated.

    " + } + }, + "documentation":"

    Contains details about a policy template

    This data type is used as a response parameter for the ListPolicyTemplates operation.

    " + }, + "PolicyTemplatesList":{ + "type":"list", + "member":{"shape":"PolicyTemplateItem"} + }, + "PolicyType":{ + "type":"string", + "enum":[ + "STATIC", + "TEMPLATE_LINKED" + ] + }, + "PrincipalEntityType":{ + "type":"string", + "max":200, + "min":1, + "pattern":".*" + }, + "PutSchemaInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "definition" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    Specifies the ID of the policy store in which to place the schema.

    " + }, + "definition":{ + "shape":"SchemaDefinition", + "documentation":"

    Specifies the definition of the schema to be stored. The schema definition must be written in Cedar schema JSON.

    " + } + } + }, + "PutSchemaOutput":{ + "type":"structure", + "required":[ + "policyStoreId", + "namespaces", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    The unique ID of the policy store that contains the schema.

    " + }, + "namespaces":{ + "shape":"NamespaceList", + "documentation":"

    Identifies the namespaces of the entities referenced by this schema.

    " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time that the schema was originally created.

    " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time that the schema was last updated.

    " + } + } + }, + "RecordAttribute":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"AttributeValue"} + }, + "ResourceArn":{ + "type":"string", + "max":2500, + "min":1, + "pattern":"arn:[^:]*:[^:]*:[^:]*:[^:]*:.*" + }, + "ResourceConflict":{ + "type":"structure", + "required":[ + "resourceId", + "resourceType" + ], + "members":{ + "resourceId":{ + "shape":"String", + "documentation":"

    The unique identifier of the resource involved in a conflict.

    " + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

    The type of the resource involved in a conflict.

    " + } + }, + "documentation":"

    Contains information about a resource conflict.

    " + }, + "ResourceConflictList":{ + "type":"list", + "member":{"shape":"ResourceConflict"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

    The unique ID of the resource referenced in the failed request.

    " + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

    The resource type of the resource referenced in the failed request.

    " + } + }, + "documentation":"

    The request failed because it references a resource that doesn't exist.

    ", + "exception":true + }, + "ResourceType":{ + "type":"string", + "enum":[ + "IDENTITY_SOURCE", + "POLICY_STORE", + "POLICY", + "POLICY_TEMPLATE", + "SCHEMA" + ] + }, + "SchemaDefinition":{ + "type":"structure", + "members":{ + "cedarJson":{ + "shape":"SchemaJson", + "documentation":"

    A JSON string representation of the schema supported by applications that use this policy store. For more information, see Policy store schema in the Amazon Verified Permissions User Guide.

    " + } + }, + "documentation":"

    Contains a list of principal types, resource types, and actions that can be specified in policies stored in the same policy store. If the validation mode for the policy store is set to STRICT, then policies that can't be validated by this schema are rejected by Verified Permissions and can't be stored in the policy store.

    ", + "union":true + }, + "SchemaJson":{ + "type":"string", + "max":10000, + "min":1 + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "message", + "resourceType" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

    The unique ID of the resource referenced in the failed request.

    " + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

    The resource type of the resource referenced in the failed request.

    " + }, + "serviceCode":{ + "shape":"String", + "documentation":"

    The code for the Amazon Web Service that owns the quota.

    " + }, + "quotaCode":{ + "shape":"String", + "documentation":"

    The quota code recognized by the Amazon Web Services Service Quotas service.

    " + } + }, + "documentation":"

    The request failed because it would cause a service quota to be exceeded.

    ", + "exception":true + }, + "SetAttribute":{ + "type":"list", + "member":{"shape":"AttributeValue"} + }, + "StaticPolicyDefinition":{ + "type":"structure", + "required":["statement"], + "members":{ + "description":{ + "shape":"StaticPolicyDescription", + "documentation":"

    The description of the static policy.

    " + }, + "statement":{ + "shape":"PolicyStatement", + "documentation":"

    The policy content of the static policy, written in the Cedar policy language.

    " + } + }, + "documentation":"

    Contains information about a static policy.

    This data type is used as a field that is part of the PolicyDefinitionDetail type.

    " + }, + "StaticPolicyDefinitionDetail":{ + "type":"structure", + "required":["statement"], + "members":{ + "description":{ + "shape":"StaticPolicyDescription", + "documentation":"

    A description of the static policy.

    " + }, + "statement":{ + "shape":"PolicyStatement", + "documentation":"

    The content of the static policy written in the Cedar policy language.

    " + } + }, + "documentation":"

    A structure that contains details about a static policy. It includes the description and policy body.

    This data type is used within a PolicyDefinition structure as part of a request parameter for the CreatePolicy operation.

    " + }, + "StaticPolicyDefinitionItem":{ + "type":"structure", + "members":{ + "description":{ + "shape":"StaticPolicyDescription", + "documentation":"

    A description of the static policy.

    " + } + }, + "documentation":"

    A structure that contains details about a static policy. It includes the description and policy statement.

    This data type is used within a PolicyDefinition structure as part of a request parameter for the CreatePolicy operation.

    " + }, + "StaticPolicyDescription":{ + "type":"string", + "max":150, + "min":0 + }, + "String":{"type":"string"}, + "StringAttribute":{"type":"string"}, + "TemplateLinkedPolicyDefinition":{ + "type":"structure", + "required":["policyTemplateId"], + "members":{ + "policyTemplateId":{ + "shape":"PolicyTemplateId", + "documentation":"

    The unique identifier of the policy template used to create this policy.

    " + }, + "principal":{ + "shape":"EntityIdentifier", + "documentation":"

    The principal associated with this template-linked policy. Verified Permissions substitutes this principal for the ?principal placeholder in the policy template when it evaluates an authorization request.

    " + }, + "resource":{ + "shape":"EntityIdentifier", + "documentation":"

    The resource associated with this template-linked policy. Verified Permissions substitutes this resource for the ?resource placeholder in the policy template when it evaluates an authorization request.

    " + } + }, + "documentation":"

    Contains information about a policy created by instantiating a policy template.

    " + }, + "TemplateLinkedPolicyDefinitionDetail":{ + "type":"structure", + "required":["policyTemplateId"], + "members":{ + "policyTemplateId":{ + "shape":"PolicyTemplateId", + "documentation":"

    The unique identifier of the policy template used to create this policy.

    " + }, + "principal":{ + "shape":"EntityIdentifier", + "documentation":"

    The principal associated with this template-linked policy. Verified Permissions substitutes this principal for the ?principal placeholder in the policy template when it evaluates an authorization request.

    " + }, + "resource":{ + "shape":"EntityIdentifier", + "documentation":"

    The resource associated with this template-linked policy. Verified Permissions substitutes this resource for the ?resource placeholder in the policy template when it evaluates an authorization request.

    " + } + }, + "documentation":"

    Contains information about a policy that was

    created by instantiating a policy template.

    This

    " + }, + "TemplateLinkedPolicyDefinitionItem":{ + "type":"structure", + "required":["policyTemplateId"], + "members":{ + "policyTemplateId":{ + "shape":"PolicyTemplateId", + "documentation":"

    The unique identifier of the policy template used to create this policy.

    " + }, + "principal":{ + "shape":"EntityIdentifier", + "documentation":"

    The principal associated with this template-linked policy. Verified Permissions substitutes this principal for the ?principal placeholder in the policy template when it evaluates an authorization request.

    " + }, + "resource":{ + "shape":"EntityIdentifier", + "documentation":"

    The resource associated with this template-linked policy. Verified Permissions substitutes this resource for the ?resource placeholder in the policy template when it evaluates an authorization request.

    " + } + }, + "documentation":"

    Contains information about a policy created by instantiating a policy template.

    This

    " + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "serviceCode":{ + "shape":"String", + "documentation":"

    The code for the Amazon Web Service that owns the quota.

    " + }, + "quotaCode":{ + "shape":"String", + "documentation":"

    The quota code recognized by the Amazon Web Services Service Quotas service.

    " + } + }, + "documentation":"

    The request failed because it exceeded a throttling quota.

    ", + "exception":true, + "retryable":{"throttling":true} + }, + "TimestampFormat":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "Token":{ + "type":"string", + "max":131072, + "min":1, + "pattern":"[A-Za-z0-9-_=]+.[A-Za-z0-9-_=]+.[A-Za-z0-9-_=]+" + }, + "UpdateCognitoUserPoolConfiguration":{ + "type":"structure", + "required":["userPoolArn"], + "members":{ + "userPoolArn":{ + "shape":"UserPoolArn", + "documentation":"

    The Amazon Resource Name (ARN) of the Amazon Cognito user pool associated with this identity source.

    " + }, + "clientIds":{ + "shape":"ClientIds", + "documentation":"

    The client ID of an app client that is configured for the specified Amazon Cognito user pool.

    " + } + }, + "documentation":"

    Contains configuration details of a Amazon Cognito user pool for use with an identity source.

    " + }, + "UpdateConfiguration":{ + "type":"structure", + "members":{ + "cognitoUserPoolConfiguration":{ + "shape":"UpdateCognitoUserPoolConfiguration", + "documentation":"

    Contains configuration details of a Amazon Cognito user pool.

    " + } + }, + "documentation":"

    Contains an updated configuration to replace the configuration in an existing identity source.

    At this time, the only valid member of this structure is a Amazon Cognito user pool configuration.

    You must specify a userPoolArn, and optionally, a ClientId.

    ", + "union":true + }, + "UpdateIdentitySourceInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "identitySourceId", + "updateConfiguration" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    Specifies the ID of the policy store that contains the identity source that you want to update.

    " + }, + "identitySourceId":{ + "shape":"IdentitySourceId", + "documentation":"

    Specifies the ID of the identity source that you want to update.

    " + }, + "updateConfiguration":{ + "shape":"UpdateConfiguration", + "documentation":"

    Specifies the details required to communicate with the identity provider (IdP) associated with this identity source.

    At this time, the only valid member of this structure is a Amazon Cognito user pool configuration.

    You must specify a userPoolArn, and optionally, a ClientId.

    " + }, + "principalEntityType":{ + "shape":"PrincipalEntityType", + "documentation":"

    Specifies the data type of principals generated for identities authenticated by the identity source.

    " + } + } + }, + "UpdateIdentitySourceOutput":{ + "type":"structure", + "required":[ + "createdDate", + "identitySourceId", + "lastUpdatedDate", + "policyStoreId" + ], + "members":{ + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time that the updated identity source was originally created.

    " + }, + "identitySourceId":{ + "shape":"IdentitySourceId", + "documentation":"

    The ID of the updated identity source.

    " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time that the identity source was most recently updated.

    " + }, + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    The ID of the policy store that contains the updated identity source.

    " + } + } + }, + "UpdatePolicyDefinition":{ + "type":"structure", + "members":{ + "static":{ + "shape":"UpdateStaticPolicyDefinition", + "documentation":"

    Contains details about the updates to be applied to a static policy.

    " + } + }, + "documentation":"

    Contains information about updates to be applied to a policy.

    This data type is used as a request parameter in the UpdatePolicy operation.

    ", + "union":true + }, + "UpdatePolicyInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyId", + "definition" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    Specifies the ID of the policy store that contains the policy that you want to update.

    " + }, + "policyId":{ + "shape":"PolicyId", + "documentation":"

    Specifies the ID of the policy that you want to update. To find this value, you can use ListPolicies.

    " + }, + "definition":{ + "shape":"UpdatePolicyDefinition", + "documentation":"

    Specifies the updated policy content that you want to replace on the specified policy. The content must be valid Cedar policy language text.

    You can change only the following elements from the policy definition:

    • The action referenced by the policy.

    • Any conditional clauses, such as when or unless clauses.

    You can't change the following elements:

    • Changing from static to templateLinked.

    • Changing the effect of the policy from permit or forbid.

    • The principal referenced by the policy.

    • The resource referenced by the policy.

    " + } + } + }, + "UpdatePolicyOutput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyId", + "policyType", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    The ID of the policy store that contains the policy that was updated.

    " + }, + "policyId":{ + "shape":"PolicyId", + "documentation":"

    The ID of the policy that was updated.

    " + }, + "policyType":{ + "shape":"PolicyType", + "documentation":"

    The type of the policy that was updated.

    " + }, + "principal":{ + "shape":"EntityIdentifier", + "documentation":"

    The principal specified in the policy's scope. This element isn't included in the response when Principal isn't present in the policy content.

    " + }, + "resource":{ + "shape":"EntityIdentifier", + "documentation":"

    The resource specified in the policy's scope. This element isn't included in the response when Resource isn't present in the policy content.

    " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time that the policy was originally created.

    " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time that the policy was most recently updated.

    " + } + } + }, + "UpdatePolicyStoreInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "validationSettings" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    Specifies the ID of the policy store that you want to update

    " + }, + "validationSettings":{ + "shape":"ValidationSettings", + "documentation":"

    A structure that defines the validation settings that want to enable for the policy store.

    " + } + } + }, + "UpdatePolicyStoreOutput":{ + "type":"structure", + "required":[ + "policyStoreId", + "arn", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    The ID of the updated policy store.

    " + }, + "arn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) of the updated policy store.

    " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time that the policy store was originally created.

    " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time that the policy store was most recently updated.

    " + } + } + }, + "UpdatePolicyTemplateInput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyTemplateId", + "statement" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    Specifies the ID of the policy store that contains the policy template that you want to update.

    " + }, + "policyTemplateId":{ + "shape":"PolicyTemplateId", + "documentation":"

    Specifies the ID of the policy template that you want to update.

    " + }, + "description":{ + "shape":"PolicyTemplateDescription", + "documentation":"

    Specifies a new description to apply to the policy template.

    " + }, + "statement":{ + "shape":"PolicyStatement", + "documentation":"

    Specifies new statement content written in Cedar policy language to replace the current body of the policy template.

    You can change only the following elements of the policy body:

    • The action referenced by the policy template.

    • Any conditional clauses, such as when or unless clauses.

    You can't change the following elements:

    • The effect (permit or forbid) of the policy template.

    • The principal referenced by the policy template.

    • The resource referenced by the policy template.

    " + } + } + }, + "UpdatePolicyTemplateOutput":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyTemplateId", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"

    The ID of the policy store that contains the updated policy template.

    " + }, + "policyTemplateId":{ + "shape":"PolicyTemplateId", + "documentation":"

    The ID of the updated policy template.

    " + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time that the policy template was originally created.

    " + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"

    The date and time that the policy template was most recently updated.

    " + } + } + }, + "UpdateStaticPolicyDefinition":{ + "type":"structure", + "required":["statement"], + "members":{ + "description":{ + "shape":"StaticPolicyDescription", + "documentation":"

    Specifies the description to be added to or replaced on the static policy.

    " + }, + "statement":{ + "shape":"PolicyStatement", + "documentation":"

    Specifies the Cedar policy language text to be added to or replaced on the static policy.

    You can change only the following elements from the original content:

    • The action referenced by the policy.

    • Any conditional clauses, such as when or unless clauses.

    You can't change the following elements:

    • Changing from StaticPolicy to TemplateLinkedPolicy.

    • The effect (permit or forbid) of the policy.

    • The principal referenced by the policy.

    • The resource referenced by the policy.

    " + } + }, + "documentation":"

    Contains information about an update to a static policy.

    " + }, + "UserPoolArn":{ + "type":"string", + "max":255, + "min":1, + "pattern":"arn:[a-zA-Z0-9-]+:cognito-idp:(([a-zA-Z0-9-]+:\\d{12}:userpool/[\\w-]+_[0-9a-zA-Z]+))" + }, + "ValidationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

    The list of fields that aren't valid.

    " + } + }, + "documentation":"

    The request failed because one or more input parameters don't satisfy their constraint requirements. The output is provided as a list of fields and a reason for each field that isn't valid.

    The possible reasons include the following:

    • UnrecognizedEntityType

      The policy includes an entity type that isn't found in the schema.

    • UnrecognizedActionId

      The policy includes an action id that isn't found in the schema.

    • InvalidActionApplication

      The policy includes an action that, according to the schema, doesn't support the specified principal and resource.

    • UnexpectedType

      The policy included an operand that isn't a valid type for the specified operation.

    • IncompatibleTypes

      The types of elements included in a set, or the types of expressions used in an if...then...else clause aren't compatible in this context.

    • MissingAttribute

      The policy attempts to access a record or entity attribute that isn't specified in the schema. Test for the existence of the attribute first before attempting to access its value. For more information, see the has (presence of attribute test) operator in the Cedar Policy Language Guide.

    • UnsafeOptionalAttributeAccess

      The policy attempts to access a record or entity attribute that is optional and isn't guaranteed to be present. Test for the existence of the attribute first before attempting to access its value. For more information, see the has (presence of attribute test) operator in the Cedar Policy Language Guide.

    • ImpossiblePolicy

      Cedar has determined that a policy condition always evaluates to false. If the policy is always false, it can never apply to any query, and so it can never affect an authorization decision.

    • WrongNumberArguments

      The policy references an extension type with the wrong number of arguments.

    • FunctionArgumentValidationError

      Cedar couldn't parse the argument passed to an extension type. For example, a string that is to be parsed as an IPv4 address can contain only digits and the period character.

    ", + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "path", + "message" + ], + "members":{ + "path":{ + "shape":"String", + "documentation":"

    The path to the specific element that Verified Permissions found to be not valid.

    " + }, + "message":{ + "shape":"String", + "documentation":"

    Describes the policy validation error.

    " + } + }, + "documentation":"

    Details about a field that failed policy validation.

    " + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationMode":{ + "type":"string", + "enum":[ + "OFF", + "STRICT" + ] + }, + "ValidationSettings":{ + "type":"structure", + "required":["mode"], + "members":{ + "mode":{ + "shape":"ValidationMode", + "documentation":"

    The validation mode currently configured for this policy store. The valid values are:

    • OFF – Neither Verified Permissions nor Cedar perform any validation on policies. No validation errors are reported by either service.

    • STRICT – Requires a schema to be present in the policy store. Cedar performs validation on all submitted new or updated static policies and policy templates. Any that fail validation are rejected and Cedar doesn't store them in the policy store.

    If Mode=STRICT and the policy store doesn't contain a schema, Verified Permissions rejects all static policies and policy templates because there is no schema to validate against.

    To submit a static policy or policy template without a schema, you must turn off validation.

    " + } + }, + "documentation":"

    A structure that contains Cedar policy validation settings for the policy store. The validation mode determines which validation failures that Cedar considers serious enough to block acceptance of a new or edited static policy or policy template.

    This data type is used as a request parameter in the CreatePolicyStore and UpdatePolicyStore operations.

    " + } + }, + "documentation":"

    Amazon Verified Permissions is a permissions management service from Amazon Web Services. You can use Verified Permissions to manage permissions for your application, and authorize user access based on those permissions. Using Verified Permissions, application developers can grant access based on information about the users, resources, and requested actions. You can also evaluate additional information like group membership, attributes of the resources, and session context, such as time of request and IP addresses. Verified Permissions manages these permissions by letting you create and store authorization policies for your applications, such as consumer-facing web sites and enterprise business systems.

    Verified Permissions uses Cedar as the policy language to express your permission requirements. Cedar supports both role-based access control (RBAC) and attribute-based access control (ABAC) authorization models.

    For more information about configuring, administering, and using Amazon Verified Permissions in your applications, see the Amazon Verified Permissions User Guide.

    For more information about the Cedar policy language, see the Cedar Policy Language Guide.

    When you write Cedar policies that reference principals, resources and actions, you can define the unique identifiers used for each of those elements. We strongly recommend that you follow these best practices:

    • Use values like universally unique identifiers (UUIDs) for all principal and resource identifiers.

      For example, if user jane leaves the company, and you later let someone else use the name jane, then that new user automatically gets access to everything granted by policies that still reference User::\"jane\". Cedar can’t distinguish between the new user and the old. This applies to both principal and resource identifiers. Always use identifiers that are guaranteed unique and never reused to ensure that you don’t unintentionally grant access because of the presence of an old identifier in a policy.

      Where you use a UUID for an entity, we recommend that you follow it with the // comment specifier and the ‘friendly’ name of your entity. This helps to make your policies easier to understand. For example: principal == User::\"a1b2c3d4-e5f6-a1b2-c3d4-EXAMPLE11111\", // alice

    • Do not include personally identifying, confidential, or sensitive information as part of the unique identifier for your principals or resources. These identifiers are included in log entries shared in CloudTrail trails.

    Several operations return structures that appear similar, but have different purposes. As new functionality is added to the product, the structure used in a parameter of one operation might need to change in a way that wouldn't make sense for the same parameter in a different operation. To help you understand the purpose of each, the following naming convention is used for the structures:

    • Parameters that end in Detail are used in Get operations.

    • Parameters that end in Item are used in List operations.

    • Parameters that use neither suffix are used in the mutating (create and update) operations.

    " +} diff --git a/services/verifiedpermissions/src/main/resources/codegen-resources/waiters-2.json b/services/verifiedpermissions/src/main/resources/codegen-resources/waiters-2.json new file mode 100644 index 000000000000..13f60ee66be6 --- /dev/null +++ b/services/verifiedpermissions/src/main/resources/codegen-resources/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/services/voiceid/pom.xml b/services/voiceid/pom.xml index 9deced0bf90e..6ac6d8f5dfcf 100644 --- a/services/voiceid/pom.xml +++ b/services/voiceid/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT voiceid AWS Java SDK :: Services :: Voice ID diff --git a/services/vpclattice/pom.xml b/services/vpclattice/pom.xml index fcbfe87499cf..7dfb6f41dffd 100644 --- a/services/vpclattice/pom.xml +++ b/services/vpclattice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT vpclattice AWS Java SDK :: Services :: VPC Lattice diff --git a/services/waf/pom.xml b/services/waf/pom.xml index 463fa99b1cb7..f9faabc9e042 100644 --- a/services/waf/pom.xml +++ b/services/waf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT waf AWS Java SDK :: Services :: AWS WAF diff --git a/services/wafv2/pom.xml b/services/wafv2/pom.xml index 2a97fe05a3d6..c2e91f94f26d 100644 --- a/services/wafv2/pom.xml +++ b/services/wafv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT wafv2 AWS Java SDK :: Services :: WAFV2 diff --git a/services/wafv2/src/main/resources/codegen-resources/service-2.json b/services/wafv2/src/main/resources/codegen-resources/service-2.json index e8541a93b797..d251b2c42e75 100644 --- a/services/wafv2/src/main/resources/codegen-resources/service-2.json +++ b/services/wafv2/src/main/resources/codegen-resources/service-2.json @@ -976,6 +976,37 @@ "type":"integer", "min":0 }, + "AWSManagedRulesACFPRuleSet":{ + "type":"structure", + "required":[ + "CreationPath", + "RegistrationPagePath", + "RequestInspection" + ], + "members":{ + "CreationPath":{ + "shape":"CreationPathString", + "documentation":"

    The path of the account creation endpoint for your application. This is the page on your website that accepts the completed registration form for a new user. This page must accept POST requests.

    For example, for the URL https://example.com/web/signup, you would provide the path /web/signup.

    " + }, + "RegistrationPagePath":{ + "shape":"RegistrationPagePathString", + "documentation":"

    The path of the account registration endpoint for your application. This is the page on your website that presents the registration form to new users.

    This page must accept GET text/html requests.

    For example, for the URL https://example.com/web/register, you would provide the path /web/register.

    " + }, + "RequestInspection":{ + "shape":"RequestInspectionACFP", + "documentation":"

    The criteria for inspecting account creation requests, used by the ACFP rule group to validate and track account creation attempts.

    " + }, + "ResponseInspection":{ + "shape":"ResponseInspection", + "documentation":"

    The criteria for inspecting responses to account creation requests, used by the ACFP rule group to track account creation success rates.

    Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

    The ACFP rule group evaluates the responses that your protected resources send back to client account creation attempts, keeping count of successful and failed attempts from each IP address and client session. Using this information, the rule group labels and mitigates requests from client sessions and IP addresses that have had too many successful account creation attempts in a short amount of time.

    " + }, + "EnableRegexInPath":{ + "shape":"Boolean", + "documentation":"

    Allow the use of regular expressions in the registration page path and the account creation path.

    " + } + }, + "documentation":"

    Details for your use of the account creation fraud prevention managed rule group, AWSManagedRulesACFPRuleSet. This configuration is used in ManagedRuleGroupConfig.

    " + }, "AWSManagedRulesATPRuleSet":{ "type":"structure", "required":["LoginPath"], @@ -990,7 +1021,11 @@ }, "ResponseInspection":{ "shape":"ResponseInspection", - "documentation":"

    The criteria for inspecting responses to login requests, used by the ATP rule group to track login failure rates.

    The ATP rule group evaluates the responses that your protected resources send back to client login attempts, keeping count of successful and failed attempts from each IP address and client session. Using this information, the rule group labels and mitigates requests from client sessions and IP addresses that submit too many failed login attempts in a short amount of time.

    Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

    " + "documentation":"

    The criteria for inspecting responses to login requests, used by the ATP rule group to track login failure rates.

    Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

    The ATP rule group evaluates the responses that your protected resources send back to client login attempts, keeping count of successful and failed attempts for each IP address and client session. Using this information, the rule group labels and mitigates requests from client sessions and IP addresses that have had too many failed login attempts in a short amount of time.

    " + }, + "EnableRegexInPath":{ + "shape":"Boolean", + "documentation":"

    Allow the use of regular expressions in the login page path.

    " } }, "documentation":"

    Details for your use of the account takeover prevention managed rule group, AWSManagedRulesATPRuleSet. This configuration is used in ManagedRuleGroupConfig.

    " @@ -1029,6 +1064,21 @@ "EXCLUDED_AS_COUNT" ] }, + "AddressField":{ + "type":"structure", + "required":["Identifier"], + "members":{ + "Identifier":{ + "shape":"FieldIdentifier", + "documentation":"

    The name of a single primary address field.

    How you specify the address fields depends on the request inspection payload type.

    • For JSON payloads, specify the field identifiers in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

      For example, for the JSON payload { \"form\": { \"primaryaddressline1\": \"THE_ADDRESS1\", \"primaryaddressline2\": \"THE_ADDRESS2\", \"primaryaddressline3\": \"THE_ADDRESS3\" } }, the address field idenfiers are /form/primaryaddressline1, /form/primaryaddressline2, and /form/primaryaddressline3.

    • For form encoded payload types, use the HTML form names.

      For example, for an HTML form with input elements named primaryaddressline1, primaryaddressline2, and primaryaddressline3, the address fields identifiers are primaryaddressline1, primaryaddressline2, and primaryaddressline3.

    " + } + }, + "documentation":"

    The name of a field in the request payload that contains part or all of your customer's primary physical address.

    This data type is used in the RequestInspectionACFP data type.

    " + }, + "AddressFields":{ + "type":"list", + "member":{"shape":"AddressField"} + }, "All":{ "type":"structure", "members":{ @@ -1842,6 +1892,12 @@ } } }, + "CreationPathString":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*\\S.*" + }, "CustomHTTPHeader":{ "type":"structure", "required":[ @@ -2249,6 +2305,17 @@ } }, "DownloadUrl":{"type":"string"}, + "EmailField":{ + "type":"structure", + "required":["Identifier"], + "members":{ + "Identifier":{ + "shape":"FieldIdentifier", + "documentation":"

    The name of the email field.

    How you specify this depends on the request inspection payload type.

    • For JSON payloads, specify the field name in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

      For example, for the JSON payload { \"form\": { \"email\": \"THE_EMAIL\" } }, the email field specification is /form/email.

    • For form encoded payload types, use the HTML form names.

      For example, for an HTML form with the input element named email1, the email field specification is email1.

    " + } + }, + "documentation":"

    The name of the field in the request payload that contains your customer's email.

    This data type is used in the RequestInspectionACFP data type.

    " + }, "EntityDescription":{ "type":"string", "max":256, @@ -2363,7 +2430,7 @@ }, "HeaderOrder":{ "shape":"HeaderOrder", - "documentation":"

    Inspect a string containing the list of the request's header names, ordered as they appear in the web request that WAF receives for inspection. WAF generates the string and then uses that as the field to match component in its inspection. WAF separates the header names in the string using commas and no added spaces.

    Matches against the header order string are case insensitive.

    " + "documentation":"

    Inspect a string containing the list of the request's header names, ordered as they appear in the web request that WAF receives for inspection. WAF generates the string and then uses that as the field to match component in its inspection. WAF separates the header names in the string using colons and no added spaces, for example host:user-agent:accept:authorization:referer.

    " } }, "documentation":"

    The part of the web request that you want WAF to inspect. Include the single FieldToMatch type that you want to inspect, with additional specifications as needed, according to the type. You specify a single request component in FieldToMatch for each rule statement that requires it. To inspect more than one component of the web request, create a separate rule statement for each component.

    Example JSON for a QueryString field to match:

    \"FieldToMatch\": { \"QueryString\": {} }

    Example JSON for a Method field to match specification:

    \"FieldToMatch\": { \"Method\": { \"Name\": \"DELETE\" } }

    " @@ -2919,7 +2986,7 @@ }, "ApplicationIntegrationURL":{ "shape":"OutputUrl", - "documentation":"

    The URL to use in SDK integrations with Amazon Web Services managed rule groups. For example, you can use the integration SDKs with the account takeover prevention managed rule group AWSManagedRulesATPRuleSet. This is only populated if you are using a rule group in your web ACL that integrates with your applications in this way. For more information, see WAF client application integration in the WAF Developer Guide.

    " + "documentation":"

    The URL to use in SDK integrations with Amazon Web Services managed rule groups. For example, you can use the integration SDKs with the account takeover prevention managed rule group AWSManagedRulesATPRuleSet and the account creation fraud prevention managed rule group AWSManagedRulesACFPRuleSet. This is only populated if you are using a rule group in your web ACL that integrates with your applications in this way. For more information, see WAF client application integration in the WAF Developer Guide.

    " } } }, @@ -3007,7 +3074,7 @@ "documentation":"

    What WAF should do if the headers of the request are more numerous or larger than WAF can inspect. WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to WAF.

    The options for oversize handling are the following:

    • CONTINUE - Inspect the available headers normally, according to the rule inspection criteria.

    • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

    • NO_MATCH - Treat the web request as not matching the rule statement.

    " } }, - "documentation":"

    Inspect a string containing the list of the request's header names, ordered as they appear in the web request that WAF receives for inspection. WAF generates the string and then uses that as the field to match component in its inspection. WAF separates the header names in the string using commas and no added spaces.

    Matches against the header order string are case insensitive.

    " + "documentation":"

    Inspect a string containing the list of the request's header names, ordered as they appear in the web request that WAF receives for inspection. WAF generates the string and then uses that as the field to match component in its inspection. WAF separates the header names in the string using colons and no added spaces, for example host:user-agent:accept:authorization:referer.

    " }, "HeaderValue":{"type":"string"}, "Headers":{ @@ -3734,7 +3801,7 @@ }, "RedactedFields":{ "shape":"RedactedFields", - "documentation":"

    The parts of the request that you want to keep out of the logs. For example, if you redact the SingleHeader field, the HEADER field in the logs will be REDACTED.

    You can specify only the following fields for redaction: UriPath, QueryString, SingleHeader, Method, and JsonBody.

    " + "documentation":"

    The parts of the request that you want to keep out of the logs.

    For example, if you redact the SingleHeader field, the HEADER field in the logs will be REDACTED for all rules that use the SingleHeader FieldToMatch setting.

    Redaction applies only to the component that's specified in the rule's FieldToMatch setting, so the SingleHeader redaction doesn't apply to rules that use the Headers FieldToMatch.

    You can specify only the following fields for redaction: UriPath, QueryString, SingleHeader, and Method.

    " }, "ManagedByFirewallManager":{ "shape":"Boolean", @@ -3832,19 +3899,19 @@ }, "PayloadType":{ "shape":"PayloadType", - "documentation":"

    Instead of this setting, provide your configuration under AWSManagedRulesATPRuleSet RequestInspection.

    ", + "documentation":"

    Instead of this setting, provide your configuration under the request inspection configuration for AWSManagedRulesATPRuleSet or AWSManagedRulesACFPRuleSet.

    ", "deprecated":true, "deprecatedMessage":"Deprecated. Use AWSManagedRulesATPRuleSet RequestInspection PayloadType" }, "UsernameField":{ "shape":"UsernameField", - "documentation":"

    Instead of this setting, provide your configuration under AWSManagedRulesATPRuleSet RequestInspection.

    ", + "documentation":"

    Instead of this setting, provide your configuration under the request inspection configuration for AWSManagedRulesATPRuleSet or AWSManagedRulesACFPRuleSet.

    ", "deprecated":true, "deprecatedMessage":"Deprecated. Use AWSManagedRulesATPRuleSet RequestInspection UsernameField" }, "PasswordField":{ "shape":"PasswordField", - "documentation":"

    Instead of this setting, provide your configuration under AWSManagedRulesATPRuleSet RequestInspection.

    ", + "documentation":"

    Instead of this setting, provide your configuration under the request inspection configuration for AWSManagedRulesATPRuleSet or AWSManagedRulesACFPRuleSet.

    ", "deprecated":true, "deprecatedMessage":"Deprecated. Use AWSManagedRulesATPRuleSet RequestInspection PasswordField" }, @@ -3855,9 +3922,13 @@ "AWSManagedRulesATPRuleSet":{ "shape":"AWSManagedRulesATPRuleSet", "documentation":"

    Additional configuration for using the account takeover prevention (ATP) managed rule group, AWSManagedRulesATPRuleSet. Use this to provide login request information to the rule group. For web ACLs that protect CloudFront distributions, use this to also provide the information about how your distribution responds to login requests.

    This configuration replaces the individual configuration fields in ManagedRuleGroupConfig and provides additional feature configuration.

    For information about using the ATP managed rule group, see WAF Fraud Control account takeover prevention (ATP) rule group and WAF Fraud Control account takeover prevention (ATP) in the WAF Developer Guide.

    " + }, + "AWSManagedRulesACFPRuleSet":{ + "shape":"AWSManagedRulesACFPRuleSet", + "documentation":"

    Additional configuration for using the account creation fraud prevention (ACFP) managed rule group, AWSManagedRulesACFPRuleSet. Use this to provide account creation request information to the rule group. For web ACLs that protect CloudFront distributions, use this to also provide the information about how your distribution responds to account creation requests.

    For information about using the ACFP managed rule group, see WAF Fraud Control account creation fraud prevention (ACFP) rule group and WAF Fraud Control account creation fraud prevention (ACFP) in the WAF Developer Guide.

    " } }, - "documentation":"

    Additional information that's used by a managed rule group. Many managed rule groups don't require this.

    Use the AWSManagedRulesATPRuleSet configuration object for the account takeover prevention managed rule group, to provide information such as the sign-in page of your application and the type of content to accept or reject from the client.

    Use the AWSManagedRulesBotControlRuleSet configuration object to configure the protection level that you want the Bot Control rule group to use.

    For example specifications, see the examples section of CreateWebACL.

    " + "documentation":"

    Additional information that's used by a managed rule group. Many managed rule groups don't require this.

    The rule groups used for intelligent threat mitigation require additional configuration:

    • Use the AWSManagedRulesACFPRuleSet configuration object to configure the account creation fraud prevention managed rule group. The configuration includes the registration and sign-up pages of your application and the locations in the account creation request payload of data, such as the user email and phone number fields.

    • Use the AWSManagedRulesATPRuleSet configuration object to configure the account takeover prevention managed rule group. The configuration includes the sign-in page of your application and the locations in the login request payload of data such as the username and password.

    • Use the AWSManagedRulesBotControlRuleSet configuration object to configure the protection level that you want the Bot Control rule group to use.

    For example specifications, see the examples section of CreateWebACL.

    " }, "ManagedRuleGroupConfigs":{ "type":"list", @@ -3892,14 +3963,14 @@ }, "ManagedRuleGroupConfigs":{ "shape":"ManagedRuleGroupConfigs", - "documentation":"

    Additional information that's used by a managed rule group. Many managed rule groups don't require this.

    Use the AWSManagedRulesATPRuleSet configuration object for the account takeover prevention managed rule group, to provide information such as the sign-in page of your application and the type of content to accept or reject from the client.

    Use the AWSManagedRulesBotControlRuleSet configuration object to configure the protection level that you want the Bot Control rule group to use.

    " + "documentation":"

    Additional information that's used by a managed rule group. Many managed rule groups don't require this.

    The rule groups used for intelligent threat mitigation require additional configuration:

    • Use the AWSManagedRulesACFPRuleSet configuration object to configure the account creation fraud prevention managed rule group. The configuration includes the registration and sign-up pages of your application and the locations in the account creation request payload of data, such as the user email and phone number fields.

    • Use the AWSManagedRulesATPRuleSet configuration object to configure the account takeover prevention managed rule group. The configuration includes the sign-in page of your application and the locations in the login request payload of data such as the username and password.

    • Use the AWSManagedRulesBotControlRuleSet configuration object to configure the protection level that you want the Bot Control rule group to use.

    " }, "RuleActionOverrides":{ "shape":"RuleActionOverrides", "documentation":"

    Action settings to use in the place of the rule actions that are configured inside the rule group. You specify one override for each rule whose action you want to change.

    You can use overrides for testing, for example you can override all of rule actions to Count and then monitor the resulting count metrics to understand how the rule group would handle your web traffic. You can also permanently override some or all actions, to modify how the rule group manages your web traffic.

    " } }, - "documentation":"

    A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups.

    You cannot nest a ManagedRuleGroupStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

    You are charged additional fees when you use the WAF Bot Control managed rule group AWSManagedRulesBotControlRuleSet or the WAF Fraud Control account takeover prevention (ATP) managed rule group AWSManagedRulesATPRuleSet. For more information, see WAF Pricing.

    " + "documentation":"

    A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups.

    You cannot nest a ManagedRuleGroupStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

    You are charged additional fees when you use the WAF Bot Control managed rule group AWSManagedRulesBotControlRuleSet, the WAF Fraud Control account takeover prevention (ATP) managed rule group AWSManagedRulesATPRuleSet, or the WAF Fraud Control account creation fraud prevention (ACFP) managed rule group AWSManagedRulesACFPRuleSet. For more information, see WAF Pricing.

    " }, "ManagedRuleGroupSummaries":{ "type":"list", @@ -4223,7 +4294,8 @@ "ATP_RULE_SET_RESPONSE_INSPECTION", "ASSOCIATED_RESOURCE_TYPE", "SCOPE_DOWN", - "CUSTOM_KEYS" + "CUSTOM_KEYS", + "ACP_RULE_SET_RESPONSE_INSPECTION" ] }, "ParameterExceptionParameter":{ @@ -4236,10 +4308,10 @@ "members":{ "Identifier":{ "shape":"FieldIdentifier", - "documentation":"

    The name of the password field. For example /form/password.

    " + "documentation":"

    The name of the password field.

    How you specify this depends on the request inspection payload type.

    • For JSON payloads, specify the field name in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

      For example, for the JSON payload { \"form\": { \"password\": \"THE_PASSWORD\" } }, the password field specification is /form/password.

    • For form encoded payload types, use the HTML form names.

      For example, for an HTML form with the input element named password1, the password field specification is password1.

    " } }, - "documentation":"

    Details about your login page password field for request inspection, used in the AWSManagedRulesATPRuleSet RequestInspection configuration.

    " + "documentation":"

    The name of the field in the request payload that contains your customer's password.

    This data type is used in the RequestInspection and RequestInspectionACFP data types.

    " }, "PayloadType":{ "type":"string", @@ -4248,6 +4320,21 @@ "FORM_ENCODED" ] }, + "PhoneNumberField":{ + "type":"structure", + "required":["Identifier"], + "members":{ + "Identifier":{ + "shape":"FieldIdentifier", + "documentation":"

    The name of a single primary phone number field.

    How you specify the phone number fields depends on the request inspection payload type.

    • For JSON payloads, specify the field identifiers in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

      For example, for the JSON payload { \"form\": { \"primaryphoneline1\": \"THE_PHONE1\", \"primaryphoneline2\": \"THE_PHONE2\", \"primaryphoneline3\": \"THE_PHONE3\" } }, the phone number field identifiers are /form/primaryphoneline1, /form/primaryphoneline2, and /form/primaryphoneline3.

    • For form encoded payload types, use the HTML form names.

      For example, for an HTML form with input elements named primaryphoneline1, primaryphoneline2, and primaryphoneline3, the phone number field identifiers are primaryphoneline1, primaryphoneline2, and primaryphoneline3.

    " + } + }, + "documentation":"

    The name of a field in the request payload that contains part or all of your customer's primary phone number.

    This data type is used in the RequestInspectionACFP data type.

    " + }, + "PhoneNumberFields":{ + "type":"list", + "member":{"shape":"PhoneNumberField"} + }, "Platform":{ "type":"string", "enum":[ @@ -4709,6 +4796,12 @@ "min":1, "pattern":".*" }, + "RegistrationPagePathString":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*\\S.*" + }, "RegularExpressionList":{ "type":"list", "member":{"shape":"Regex"} @@ -4762,15 +4855,46 @@ }, "UsernameField":{ "shape":"UsernameField", - "documentation":"

    Details about your login page username field.

    How you specify this depends on the payload type.

    • For JSON payloads, specify the field name in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

      For example, for the JSON payload { \"login\": { \"username\": \"THE_USERNAME\", \"password\": \"THE_PASSWORD\" } }, the username field specification is /login/username and the password field specification is /login/password.

    • For form encoded payload types, use the HTML form names.

      For example, for an HTML form with input elements named username1 and password1, the username field specification is username1 and the password field specification is password1.

    " + "documentation":"

    The name of the field in the request payload that contains your customer's username.

    How you specify this depends on the request inspection payload type.

    • For JSON payloads, specify the field name in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

      For example, for the JSON payload { \"form\": { \"username\": \"THE_USERNAME\" } }, the username field specification is /form/username.

    • For form encoded payload types, use the HTML form names.

      For example, for an HTML form with the input element named username1, the username field specification is username1

    " }, "PasswordField":{ "shape":"PasswordField", - "documentation":"

    Details about your login page password field.

    How you specify this depends on the payload type.

    • For JSON payloads, specify the field name in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

      For example, for the JSON payload { \"login\": { \"username\": \"THE_USERNAME\", \"password\": \"THE_PASSWORD\" } }, the username field specification is /login/username and the password field specification is /login/password.

    • For form encoded payload types, use the HTML form names.

      For example, for an HTML form with input elements named username1 and password1, the username field specification is username1 and the password field specification is password1.

    " + "documentation":"

    The name of the field in the request payload that contains your customer's password.

    How you specify this depends on the request inspection payload type.

    • For JSON payloads, specify the field name in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

      For example, for the JSON payload { \"form\": { \"password\": \"THE_PASSWORD\" } }, the password field specification is /form/password.

    • For form encoded payload types, use the HTML form names.

      For example, for an HTML form with the input element named password1, the password field specification is password1.

    " } }, "documentation":"

    The criteria for inspecting login requests, used by the ATP rule group to validate credentials usage.

    This is part of the AWSManagedRulesATPRuleSet configuration in ManagedRuleGroupConfig.

    In these settings, you specify how your application accepts login attempts by providing the request payload type and the names of the fields within the request body where the username and password are provided.

    " }, + "RequestInspectionACFP":{ + "type":"structure", + "required":["PayloadType"], + "members":{ + "PayloadType":{ + "shape":"PayloadType", + "documentation":"

    The payload type for your account creation endpoint, either JSON or form encoded.

    " + }, + "UsernameField":{ + "shape":"UsernameField", + "documentation":"

    The name of the field in the request payload that contains your customer's username.

    How you specify this depends on the request inspection payload type.

    • For JSON payloads, specify the field name in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

      For example, for the JSON payload { \"form\": { \"username\": \"THE_USERNAME\" } }, the username field specification is /form/username.

    • For form encoded payload types, use the HTML form names.

      For example, for an HTML form with the input element named username1, the username field specification is username1

    " + }, + "PasswordField":{ + "shape":"PasswordField", + "documentation":"

    The name of the field in the request payload that contains your customer's password.

    How you specify this depends on the request inspection payload type.

    • For JSON payloads, specify the field name in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

      For example, for the JSON payload { \"form\": { \"password\": \"THE_PASSWORD\" } }, the password field specification is /form/password.

    • For form encoded payload types, use the HTML form names.

      For example, for an HTML form with the input element named password1, the password field specification is password1.

    " + }, + "EmailField":{ + "shape":"EmailField", + "documentation":"

    The name of the field in the request payload that contains your customer's email.

    How you specify this depends on the request inspection payload type.

    • For JSON payloads, specify the field name in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

      For example, for the JSON payload { \"form\": { \"email\": \"THE_EMAIL\" } }, the email field specification is /form/email.

    • For form encoded payload types, use the HTML form names.

      For example, for an HTML form with the input element named email1, the email field specification is email1.

    " + }, + "PhoneNumberFields":{ + "shape":"PhoneNumberFields", + "documentation":"

    The names of the fields in the request payload that contain your customer's primary phone number.

    Order the phone number fields in the array exactly as they are ordered in the request payload.

    How you specify the phone number fields depends on the request inspection payload type.

    • For JSON payloads, specify the field identifiers in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

      For example, for the JSON payload { \"form\": { \"primaryphoneline1\": \"THE_PHONE1\", \"primaryphoneline2\": \"THE_PHONE2\", \"primaryphoneline3\": \"THE_PHONE3\" } }, the phone number field identifiers are /form/primaryphoneline1, /form/primaryphoneline2, and /form/primaryphoneline3.

    • For form encoded payload types, use the HTML form names.

      For example, for an HTML form with input elements named primaryphoneline1, primaryphoneline2, and primaryphoneline3, the phone number field identifiers are primaryphoneline1, primaryphoneline2, and primaryphoneline3.

    " + }, + "AddressFields":{ + "shape":"AddressFields", + "documentation":"

    The names of the fields in the request payload that contain your customer's primary physical address.

    Order the address fields in the array exactly as they are ordered in the request payload.

    How you specify the address fields depends on the request inspection payload type.

    • For JSON payloads, specify the field identifiers in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

      For example, for the JSON payload { \"form\": { \"primaryaddressline1\": \"THE_ADDRESS1\", \"primaryaddressline2\": \"THE_ADDRESS2\", \"primaryaddressline3\": \"THE_ADDRESS3\" } }, the address field idenfiers are /form/primaryaddressline1, /form/primaryaddressline2, and /form/primaryaddressline3.

    • For form encoded payload types, use the HTML form names.

      For example, for an HTML form with input elements named primaryaddressline1, primaryaddressline2, and primaryaddressline3, the address fields identifiers are primaryaddressline1, primaryaddressline2, and primaryaddressline3.

    " + } + }, + "documentation":"

    The criteria for inspecting account creation requests, used by the ACFP rule group to validate and track account creation attempts.

    This is part of the AWSManagedRulesACFPRuleSet configuration in ManagedRuleGroupConfig.

    In these settings, you specify how your application accepts account creation attempts by providing the request payload type and the names of the fields within the request body where the username, password, email, and primary address and phone number fields are provided.

    " + }, "ResourceArn":{ "type":"string", "max":2048, @@ -4812,22 +4936,22 @@ "members":{ "StatusCode":{ "shape":"ResponseInspectionStatusCode", - "documentation":"

    Configures inspection of the response status code.

    " + "documentation":"

    Configures inspection of the response status code for success and failure indicators.

    " }, "Header":{ "shape":"ResponseInspectionHeader", - "documentation":"

    Configures inspection of the response header.

    " + "documentation":"

    Configures inspection of the response header for success and failure indicators.

    " }, "BodyContains":{ "shape":"ResponseInspectionBodyContains", - "documentation":"

    Configures inspection of the response body. WAF can inspect the first 65,536 bytes (64 KB) of the response body.

    " + "documentation":"

    Configures inspection of the response body for success and failure indicators. WAF can inspect the first 65,536 bytes (64 KB) of the response body.

    " }, "Json":{ "shape":"ResponseInspectionJson", - "documentation":"

    Configures inspection of the response JSON. WAF can inspect the first 65,536 bytes (64 KB) of the response JSON.

    " + "documentation":"

    Configures inspection of the response JSON for success and failure indicators. WAF can inspect the first 65,536 bytes (64 KB) of the response JSON.

    " } }, - "documentation":"

    The criteria for inspecting responses to login requests, used by the ATP rule group to track login failure rates.

    The ATP rule group evaluates the responses that your protected resources send back to client login attempts, keeping count of successful and failed attempts from each IP address and client session. Using this information, the rule group labels and mitigates requests from client sessions and IP addresses that submit too many failed login attempts in a short amount of time.

    Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

    This is part of the AWSManagedRulesATPRuleSet configuration in ManagedRuleGroupConfig.

    Enable login response inspection by configuring exactly one component of the response to inspect. You can't configure more than one. If you don't configure any of the response inspection options, response inspection is disabled.

    " + "documentation":"

    The criteria for inspecting responses to login requests and account creation requests, used by the ATP and ACFP rule groups to track login and account creation success and failure rates.

    Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

    The rule groups evaluates the responses that your protected resources send back to client login and account creation attempts, keeping count of successful and failed attempts from each IP address and client session. Using this information, the rule group labels and mitigates requests from client sessions and IP addresses with too much suspicious activity in a short amount of time.

    This is part of the AWSManagedRulesATPRuleSet and AWSManagedRulesACFPRuleSet configurations in ManagedRuleGroupConfig.

    Enable response inspection by configuring exactly one component of the response to inspect, for example, Header or StatusCode. You can't configure more than one component for inspection. If you don't configure any of the response inspection options, response inspection is disabled.

    " }, "ResponseInspectionBodyContains":{ "type":"structure", @@ -4838,14 +4962,14 @@ "members":{ "SuccessStrings":{ "shape":"ResponseInspectionBodyContainsSuccessStrings", - "documentation":"

    Strings in the body of the response that indicate a successful login attempt. To be counted as a successful login, the string can be anywhere in the body and must be an exact match, including case. Each string must be unique among the success and failure strings.

    JSON example: \"SuccessStrings\": [ \"Login successful\", \"Welcome to our site!\" ]

    " + "documentation":"

    Strings in the body of the response that indicate a successful login or account creation attempt. To be counted as a success, the string can be anywhere in the body and must be an exact match, including case. Each string must be unique among the success and failure strings.

    JSON examples: \"SuccessStrings\": [ \"Login successful\" ] and \"SuccessStrings\": [ \"Account creation successful\", \"Welcome to our site!\" ]

    " }, "FailureStrings":{ "shape":"ResponseInspectionBodyContainsFailureStrings", - "documentation":"

    Strings in the body of the response that indicate a failed login attempt. To be counted as a failed login, the string can be anywhere in the body and must be an exact match, including case. Each string must be unique among the success and failure strings.

    JSON example: \"FailureStrings\": [ \"Login failed\" ]

    " + "documentation":"

    Strings in the body of the response that indicate a failed login or account creation attempt. To be counted as a failure, the string can be anywhere in the body and must be an exact match, including case. Each string must be unique among the success and failure strings.

    JSON example: \"FailureStrings\": [ \"Request failed\" ]

    " } }, - "documentation":"

    Configures inspection of the response body. WAF can inspect the first 65,536 bytes (64 KB) of the response body. This is part of the ResponseInspection configuration for AWSManagedRulesATPRuleSet.

    Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

    " + "documentation":"

    Configures inspection of the response body. WAF can inspect the first 65,536 bytes (64 KB) of the response body. This is part of the ResponseInspection configuration for AWSManagedRulesATPRuleSet and AWSManagedRulesACFPRuleSet.

    Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

    " }, "ResponseInspectionBodyContainsFailureStrings":{ "type":"list", @@ -4869,18 +4993,18 @@ "members":{ "Name":{ "shape":"ResponseInspectionHeaderName", - "documentation":"

    The name of the header to match against. The name must be an exact match, including case.

    JSON example: \"Name\": [ \"LoginResult\" ]

    " + "documentation":"

    The name of the header to match against. The name must be an exact match, including case.

    JSON example: \"Name\": [ \"RequestResult\" ]

    " }, "SuccessValues":{ "shape":"ResponseInspectionHeaderSuccessValues", - "documentation":"

    Values in the response header with the specified name that indicate a successful login attempt. To be counted as a successful login, the value must be an exact match, including case. Each value must be unique among the success and failure values.

    JSON example: \"SuccessValues\": [ \"LoginPassed\", \"Successful login\" ]

    " + "documentation":"

    Values in the response header with the specified name that indicate a successful login or account creation attempt. To be counted as a success, the value must be an exact match, including case. Each value must be unique among the success and failure values.

    JSON examples: \"SuccessValues\": [ \"LoginPassed\", \"Successful login\" ] and \"SuccessValues\": [ \"AccountCreated\", \"Successful account creation\" ]

    " }, "FailureValues":{ "shape":"ResponseInspectionHeaderFailureValues", - "documentation":"

    Values in the response header with the specified name that indicate a failed login attempt. To be counted as a failed login, the value must be an exact match, including case. Each value must be unique among the success and failure values.

    JSON example: \"FailureValues\": [ \"LoginFailed\", \"Failed login\" ]

    " + "documentation":"

    Values in the response header with the specified name that indicate a failed login or account creation attempt. To be counted as a failure, the value must be an exact match, including case. Each value must be unique among the success and failure values.

    JSON examples: \"FailureValues\": [ \"LoginFailed\", \"Failed login\" ] and \"FailureValues\": [ \"AccountCreationFailed\" ]

    " } }, - "documentation":"

    Configures inspection of the response header. This is part of the ResponseInspection configuration for AWSManagedRulesATPRuleSet.

    Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

    " + "documentation":"

    Configures inspection of the response header. This is part of the ResponseInspection configuration for AWSManagedRulesATPRuleSet and AWSManagedRulesACFPRuleSet.

    Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

    " }, "ResponseInspectionHeaderFailureValues":{ "type":"list", @@ -4910,18 +5034,18 @@ "members":{ "Identifier":{ "shape":"FieldIdentifier", - "documentation":"

    The identifier for the value to match against in the JSON. The identifier must be an exact match, including case.

    JSON example: \"Identifier\": [ \"/login/success\" ]

    " + "documentation":"

    The identifier for the value to match against in the JSON. The identifier must be an exact match, including case.

    JSON examples: \"Identifier\": [ \"/login/success\" ] and \"Identifier\": [ \"/sign-up/success\" ]

    " }, "SuccessValues":{ "shape":"ResponseInspectionJsonSuccessValues", - "documentation":"

    Values for the specified identifier in the response JSON that indicate a successful login attempt. To be counted as a successful login, the value must be an exact match, including case. Each value must be unique among the success and failure values.

    JSON example: \"SuccessValues\": [ \"True\", \"Succeeded\" ]

    " + "documentation":"

    Values for the specified identifier in the response JSON that indicate a successful login or account creation attempt. To be counted as a success, the value must be an exact match, including case. Each value must be unique among the success and failure values.

    JSON example: \"SuccessValues\": [ \"True\", \"Succeeded\" ]

    " }, "FailureValues":{ "shape":"ResponseInspectionJsonFailureValues", - "documentation":"

    Values for the specified identifier in the response JSON that indicate a failed login attempt. To be counted as a failed login, the value must be an exact match, including case. Each value must be unique among the success and failure values.

    JSON example: \"FailureValues\": [ \"False\", \"Failed\" ]

    " + "documentation":"

    Values for the specified identifier in the response JSON that indicate a failed login or account creation attempt. To be counted as a failure, the value must be an exact match, including case. Each value must be unique among the success and failure values.

    JSON example: \"FailureValues\": [ \"False\", \"Failed\" ]

    " } }, - "documentation":"

    Configures inspection of the response JSON. WAF can inspect the first 65,536 bytes (64 KB) of the response JSON. This is part of the ResponseInspection configuration for AWSManagedRulesATPRuleSet.

    Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

    " + "documentation":"

    Configures inspection of the response JSON. WAF can inspect the first 65,536 bytes (64 KB) of the response JSON. This is part of the ResponseInspection configuration for AWSManagedRulesATPRuleSet and AWSManagedRulesACFPRuleSet.

    Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

    " }, "ResponseInspectionJsonFailureValues":{ "type":"list", @@ -4944,14 +5068,14 @@ "members":{ "SuccessCodes":{ "shape":"ResponseInspectionStatusCodeSuccessCodes", - "documentation":"

    Status codes in the response that indicate a successful login attempt. To be counted as a successful login, the response status code must match one of these. Each code must be unique among the success and failure status codes.

    JSON example: \"SuccessCodes\": [ 200, 201 ]

    " + "documentation":"

    Status codes in the response that indicate a successful login or account creation attempt. To be counted as a success, the response status code must match one of these. Each code must be unique among the success and failure status codes.

    JSON example: \"SuccessCodes\": [ 200, 201 ]

    " }, "FailureCodes":{ "shape":"ResponseInspectionStatusCodeFailureCodes", - "documentation":"

    Status codes in the response that indicate a failed login attempt. To be counted as a failed login, the response status code must match one of these. Each code must be unique among the success and failure status codes.

    JSON example: \"FailureCodes\": [ 400, 404 ]

    " + "documentation":"

    Status codes in the response that indicate a failed login or account creation attempt. To be counted as a failure, the response status code must match one of these. Each code must be unique among the success and failure status codes.

    JSON example: \"FailureCodes\": [ 400, 404 ]

    " } }, - "documentation":"

    Configures inspection of the response status code. This is part of the ResponseInspection configuration for AWSManagedRulesATPRuleSet.

    Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

    " + "documentation":"

    Configures inspection of the response status code. This is part of the ResponseInspection configuration for AWSManagedRulesATPRuleSet and AWSManagedRulesACFPRuleSet.

    Response inspection is available only in web ACLs that protect Amazon CloudFront distributions.

    " }, "ResponseInspectionStatusCodeFailureCodes":{ "type":"list", @@ -5423,7 +5547,7 @@ }, "ManagedRuleGroupStatement":{ "shape":"ManagedRuleGroupStatement", - "documentation":"

    A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups.

    You cannot nest a ManagedRuleGroupStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

    You are charged additional fees when you use the WAF Bot Control managed rule group AWSManagedRulesBotControlRuleSet or the WAF Fraud Control account takeover prevention (ATP) managed rule group AWSManagedRulesATPRuleSet. For more information, see WAF Pricing.

    " + "documentation":"

    A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups.

    You cannot nest a ManagedRuleGroupStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

    You are charged additional fees when you use the WAF Bot Control managed rule group AWSManagedRulesBotControlRuleSet, the WAF Fraud Control account takeover prevention (ATP) managed rule group AWSManagedRulesATPRuleSet, or the WAF Fraud Control account creation fraud prevention (ACFP) managed rule group AWSManagedRulesACFPRuleSet. For more information, see WAF Pricing.

    " }, "LabelMatchStatement":{ "shape":"LabelMatchStatement", @@ -5925,10 +6049,10 @@ "members":{ "Identifier":{ "shape":"FieldIdentifier", - "documentation":"

    The name of the username field. For example /form/username.

    " + "documentation":"

    The name of the username field.

    How you specify this depends on the request inspection payload type.

    • For JSON payloads, specify the field name in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation JavaScript Object Notation (JSON) Pointer.

      For example, for the JSON payload { \"form\": { \"username\": \"THE_USERNAME\" } }, the username field specification is /form/username.

    • For form encoded payload types, use the HTML form names.

      For example, for an HTML form with the input element named username1, the username field specification is username1

    " } }, - "documentation":"

    Details about your login page username field for request inspection, used in the AWSManagedRulesATPRuleSet RequestInspection configuration.

    " + "documentation":"

    The name of the field in the request payload that contains your customer's username.

    This data type is used in the RequestInspection and RequestInspectionACFP data types.

    " }, "VendorName":{ "type":"string", diff --git a/services/wellarchitected/pom.xml b/services/wellarchitected/pom.xml index 8867a6a788a8..ed68e44078be 100644 --- a/services/wellarchitected/pom.xml +++ b/services/wellarchitected/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT wellarchitected AWS Java SDK :: Services :: Well Architected diff --git a/services/wellarchitected/src/main/resources/codegen-resources/paginators-1.json b/services/wellarchitected/src/main/resources/codegen-resources/paginators-1.json index e2d9e2d68289..fd24c74bd14a 100644 --- a/services/wellarchitected/src/main/resources/codegen-resources/paginators-1.json +++ b/services/wellarchitected/src/main/resources/codegen-resources/paginators-1.json @@ -50,6 +50,21 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "ListProfileNotifications": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListProfileShares": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListProfiles": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListShareInvitations": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/wellarchitected/src/main/resources/codegen-resources/service-2.json b/services/wellarchitected/src/main/resources/codegen-resources/service-2.json index 8362187968f2..5ccdcc1bca3b 100644 --- a/services/wellarchitected/src/main/resources/codegen-resources/service-2.json +++ b/services/wellarchitected/src/main/resources/codegen-resources/service-2.json @@ -30,6 +30,23 @@ ], "documentation":"

    Associate a lens to a workload.

    Up to 10 lenses can be associated with a workload in a single API operation. A maximum of 20 lenses can be associated with a workload.

    Disclaimer

    By accessing and/or applying custom lenses created by another Amazon Web Services user or account, you acknowledge that custom lenses created by other users and shared with you are Third Party Content as defined in the Amazon Web Services Customer Agreement.

    " }, + "AssociateProfiles":{ + "name":"AssociateProfiles", + "http":{ + "method":"PATCH", + "requestUri":"/workloads/{WorkloadId}/associateProfiles" + }, + "input":{"shape":"AssociateProfilesInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Associate a profile with a workload.

    " + }, "CreateLensShare":{ "name":"CreateLensShare", "http":{ @@ -87,6 +104,43 @@ ], "documentation":"

    Create a milestone for an existing workload.

    " }, + "CreateProfile":{ + "name":"CreateProfile", + "http":{ + "method":"POST", + "requestUri":"/profiles" + }, + "input":{"shape":"CreateProfileInput"}, + "output":{"shape":"CreateProfileOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Create a profile.

    " + }, + "CreateProfileShare":{ + "name":"CreateProfileShare", + "http":{ + "method":"POST", + "requestUri":"/profiles/{ProfileArn}/shares" + }, + "input":{"shape":"CreateProfileShareInput"}, + "output":{"shape":"CreateProfileShareOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Create a profile share.

    " + }, "CreateWorkload":{ "name":"CreateWorkload", "http":{ @@ -159,6 +213,40 @@ ], "documentation":"

    Delete a lens share.

    After the lens share is deleted, Amazon Web Services accounts, users, organizations, and organizational units (OUs) that you shared the lens with can continue to use it, but they will no longer be able to apply it to new workloads.

    Disclaimer

    By sharing your custom lenses with other Amazon Web Services accounts, you acknowledge that Amazon Web Services will make your custom lenses available to those other accounts. Those other accounts may continue to access and use your shared custom lenses even if you delete the custom lenses from your own Amazon Web Services account or terminate your Amazon Web Services account.

    " }, + "DeleteProfile":{ + "name":"DeleteProfile", + "http":{ + "method":"DELETE", + "requestUri":"/profiles/{ProfileArn}" + }, + "input":{"shape":"DeleteProfileInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Delete a profile.

    Disclaimer

    By sharing your profile with other Amazon Web Services accounts, you acknowledge that Amazon Web Services will make your profile available to those other accounts. Those other accounts may continue to access and use your shared profile even if you delete the profile from your own Amazon Web Services account or terminate your Amazon Web Services account.

    " + }, + "DeleteProfileShare":{ + "name":"DeleteProfileShare", + "http":{ + "method":"DELETE", + "requestUri":"/profiles/{ProfileArn}/shares/{ShareId}" + }, + "input":{"shape":"DeleteProfileShareInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Delete a profile share.

    " + }, "DeleteWorkload":{ "name":"DeleteWorkload", "http":{ @@ -210,6 +298,23 @@ ], "documentation":"

    Disassociate a lens from a workload.

    Up to 10 lenses can be disassociated from a workload in a single API operation.

    The Amazon Web Services Well-Architected Framework lens (wellarchitected) cannot be removed from a workload.

    " }, + "DisassociateProfiles":{ + "name":"DisassociateProfiles", + "http":{ + "method":"PATCH", + "requestUri":"/workloads/{WorkloadId}/disassociateProfiles" + }, + "input":{"shape":"DisassociateProfilesInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Disassociate a profile from a workload.

    " + }, "ExportLens":{ "name":"ExportLens", "http":{ @@ -346,6 +451,40 @@ ], "documentation":"

    Get a milestone for an existing workload.

    " }, + "GetProfile":{ + "name":"GetProfile", + "http":{ + "method":"GET", + "requestUri":"/profiles/{ProfileArn}" + }, + "input":{"shape":"GetProfileInput"}, + "output":{"shape":"GetProfileOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Get profile information.

    " + }, + "GetProfileTemplate":{ + "name":"GetProfileTemplate", + "http":{ + "method":"GET", + "requestUri":"/profileTemplate" + }, + "input":{"shape":"GetProfileTemplateInput"}, + "output":{"shape":"GetProfileTemplateOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Get profile template.

    " + }, "GetWorkload":{ "name":"GetWorkload", "http":{ @@ -533,6 +672,55 @@ ], "documentation":"

    List lens notifications.

    " }, + "ListProfileNotifications":{ + "name":"ListProfileNotifications", + "http":{ + "method":"GET", + "requestUri":"/profileNotifications/" + }, + "input":{"shape":"ListProfileNotificationsInput"}, + "output":{"shape":"ListProfileNotificationsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    List profile notifications.

    " + }, + "ListProfileShares":{ + "name":"ListProfileShares", + "http":{ + "method":"GET", + "requestUri":"/profiles/{ProfileArn}/shares" + }, + "input":{"shape":"ListProfileSharesInput"}, + "output":{"shape":"ListProfileSharesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    List profile shares.

    " + }, + "ListProfiles":{ + "name":"ListProfiles", + "http":{ + "method":"GET", + "requestUri":"/profileSummaries" + }, + "input":{"shape":"ListProfilesInput"}, + "output":{"shape":"ListProfilesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    List profiles.

    " + }, "ListShareInvitations":{ "name":"ListShareInvitations", "http":{ @@ -561,7 +749,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

    List the tags for a resource.

    The WorkloadArn parameter can be either a workload ARN or a custom lens ARN.

    " + "documentation":"

    List the tags for a resource.

    The WorkloadArn parameter can be a workload ARN, a custom lens ARN, or a profile ARN.

    " }, "ListWorkloadShares":{ "name":"ListWorkloadShares", @@ -608,7 +796,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

    Adds one or more tags to the specified resource.

    The WorkloadArn parameter can be either a workload ARN or a custom lens ARN.

    " + "documentation":"

    Adds one or more tags to the specified resource.

    The WorkloadArn parameter can be a workload ARN, a custom lens ARN, or a profile ARN.

    " }, "UntagResource":{ "name":"UntagResource", @@ -622,7 +810,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

    Deletes specified tags from a resource.

    The WorkloadArn parameter can be either a workload ARN or a custom lens ARN.

    To specify multiple tags, use separate tagKeys parameters, for example:

    DELETE /tags/WorkloadArn?tagKeys=key1&tagKeys=key2

    " + "documentation":"

    Deletes specified tags from a resource.

    The WorkloadArn parameter can be a workload ARN, a custom lens ARN, or a profile ARN.

    To specify multiple tags, use separate tagKeys parameters, for example:

    DELETE /tags/WorkloadArn?tagKeys=key1&tagKeys=key2

    " }, "UpdateAnswer":{ "name":"UpdateAnswer", @@ -676,6 +864,24 @@ ], "documentation":"

    Update lens review for a particular workload.

    " }, + "UpdateProfile":{ + "name":"UpdateProfile", + "http":{ + "method":"PATCH", + "requestUri":"/profiles/{ProfileArn}" + }, + "input":{"shape":"UpdateProfileInput"}, + "output":{"shape":"UpdateProfileOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Update a profile.

    " + }, "UpdateShareInvitation":{ "name":"UpdateShareInvitation", "http":{ @@ -746,6 +952,23 @@ {"shape":"ThrottlingException"} ], "documentation":"

    Upgrade lens review for a particular workload.

    " + }, + "UpgradeProfileVersion":{ + "name":"UpgradeProfileVersion", + "http":{ + "method":"PUT", + "requestUri":"/workloads/{WorkloadId}/profiles/{ProfileArn}/upgrade" + }, + "input":{"shape":"UpgradeProfileVersionInput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Upgrade a profile.

    " } }, "shapes":{ @@ -851,6 +1074,10 @@ "Reason":{ "shape":"AnswerReason", "documentation":"

    The reason why a choice is non-applicable to a question in your workload.

    " + }, + "QuestionType":{ + "shape":"QuestionType", + "documentation":"

    The type of the question.

    " } }, "documentation":"

    An answer summary of a lens review in a workload.

    " @@ -876,9 +1103,29 @@ }, "documentation":"

    Input to associate lens reviews.

    " }, + "AssociateProfilesInput":{ + "type":"structure", + "required":[ + "WorkloadId", + "ProfileArns" + ], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "ProfileArns":{ + "shape":"ProfileArns", + "documentation":"

    The list of profile ARNs to associate with the workload.

    " + } + } + }, "AwsAccountId":{ "type":"string", "documentation":"

    An Amazon Web Services account ID.

    ", + "max":12, + "min":12, "pattern":"[0-9]{12}" }, "AwsRegion":{ @@ -1196,7 +1443,9 @@ }, "ClientRequestToken":{ "type":"string", - "documentation":"

    A unique case-sensitive string used to ensure that this request is idempotent (executes only once).

    You should not reuse the same token for other requests. If you retry a request with the same client request token and the same parameters after the original request has completed successfully, the result of the original request is returned.

    This token is listed as required, however, if you do not specify it, the Amazon Web Services SDKs automatically generate one for you. If you are not using the Amazon Web Services SDK or the CLI, you must provide this token or the request will fail.

    " + "documentation":"

    A unique case-sensitive string used to ensure that this request is idempotent (executes only once).

    You should not reuse the same token for other requests. If you retry a request with the same client request token and the same parameters after the original request has completed successfully, the result of the original request is returned.

    This token is listed as required, however, if you do not specify it, the Amazon Web Services SDKs automatically generate one for you. If you are not using the Amazon Web Services SDK or the CLI, you must provide this token or the request will fail.

    ", + "max":2048, + "min":1 }, "ConflictException":{ "type":"structure", @@ -1341,6 +1590,81 @@ }, "documentation":"

    Output of a create milestone call.

    " }, + "CreateProfileInput":{ + "type":"structure", + "required":[ + "ProfileName", + "ProfileDescription", + "ProfileQuestions", + "ClientRequestToken" + ], + "members":{ + "ProfileName":{ + "shape":"ProfileName", + "documentation":"

    Name of the profile.

    " + }, + "ProfileDescription":{ + "shape":"ProfileDescription", + "documentation":"

    The profile description.

    " + }, + "ProfileQuestions":{ + "shape":"ProfileQuestionUpdates", + "documentation":"

    The profile questions.

    " + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags assigned to the profile.

    " + } + } + }, + "CreateProfileOutput":{ + "type":"structure", + "members":{ + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

    The profile ARN.

    " + }, + "ProfileVersion":{ + "shape":"ProfileVersion", + "documentation":"

    Version of the profile.

    " + } + } + }, + "CreateProfileShareInput":{ + "type":"structure", + "required":[ + "ProfileArn", + "SharedWith", + "ClientRequestToken" + ], + "members":{ + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

    The profile ARN.

    ", + "location":"uri", + "locationName":"ProfileArn" + }, + "SharedWith":{"shape":"SharedWith"}, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + } + } + }, + "CreateProfileShareOutput":{ + "type":"structure", + "members":{ + "ShareId":{"shape":"ShareId"}, + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

    The profile ARN.

    " + } + } + }, "CreateWorkloadInput":{ "type":"structure", "required":[ @@ -1379,6 +1703,10 @@ "Applications":{ "shape":"WorkloadApplications", "documentation":"

    List of AppRegistry application ARNs associated to the workload.

    " + }, + "ProfileArns":{ + "shape":"WorkloadProfileArns", + "documentation":"

    The list of profile ARNs associated with the workload.

    " } }, "documentation":"

    Input for workload creation.

    " @@ -1482,6 +1810,54 @@ } } }, + "DeleteProfileInput":{ + "type":"structure", + "required":[ + "ProfileArn", + "ClientRequestToken" + ], + "members":{ + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

    The profile ARN.

    ", + "location":"uri", + "locationName":"ProfileArn" + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true, + "location":"querystring", + "locationName":"ClientRequestToken" + } + } + }, + "DeleteProfileShareInput":{ + "type":"structure", + "required":[ + "ShareId", + "ProfileArn", + "ClientRequestToken" + ], + "members":{ + "ShareId":{ + "shape":"ShareId", + "location":"uri", + "locationName":"ShareId" + }, + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

    The profile ARN.

    ", + "location":"uri", + "locationName":"ProfileArn" + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true, + "location":"querystring", + "locationName":"ClientRequestToken" + } + } + }, "DeleteWorkloadInput":{ "type":"structure", "required":[ @@ -1554,15 +1930,33 @@ }, "documentation":"

    Input to disassociate lens reviews.

    " }, - "DiscoveryIntegrationStatus":{ - "type":"string", - "enum":[ - "ENABLED", - "DISABLED" - ] - }, - "DisplayText":{ - "type":"string", + "DisassociateProfilesInput":{ + "type":"structure", + "required":[ + "WorkloadId", + "ProfileArns" + ], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "ProfileArns":{ + "shape":"ProfileArns", + "documentation":"

    The list of profile ARNs to disassociate from the workload.

    " + } + } + }, + "DiscoveryIntegrationStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "DisplayText":{ + "type":"string", "max":64, "min":1 }, @@ -1867,6 +2261,47 @@ }, "documentation":"

    Output of a get milestone call.

    " }, + "GetProfileInput":{ + "type":"structure", + "required":["ProfileArn"], + "members":{ + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

    The profile ARN.

    ", + "location":"uri", + "locationName":"ProfileArn" + }, + "ProfileVersion":{ + "shape":"ProfileVersion", + "documentation":"

    The profile version.

    ", + "location":"querystring", + "locationName":"ProfileVersion" + } + } + }, + "GetProfileOutput":{ + "type":"structure", + "members":{ + "Profile":{ + "shape":"Profile", + "documentation":"

    The profile.

    " + } + } + }, + "GetProfileTemplateInput":{ + "type":"structure", + "members":{ + } + }, + "GetProfileTemplateOutput":{ + "type":"structure", + "members":{ + "ProfileTemplate":{ + "shape":"ProfileTemplate", + "documentation":"

    The profile template.

    " + } + } + }, "GetWorkloadInput":{ "type":"structure", "required":["WorkloadId"], @@ -2082,7 +2517,12 @@ "UpdatedAt":{"shape":"Timestamp"}, "Notes":{"shape":"Notes"}, "RiskCounts":{"shape":"RiskCounts"}, - "NextToken":{"shape":"NextToken"} + "NextToken":{"shape":"NextToken"}, + "Profiles":{ + "shape":"WorkloadProfiles", + "documentation":"

    The profiles associated with the workload.

    " + }, + "PrioritizedRiskCounts":{"shape":"RiskCounts"} }, "documentation":"

    A lens review of a question.

    " }, @@ -2121,7 +2561,12 @@ "documentation":"

    The status of the lens.

    " }, "UpdatedAt":{"shape":"Timestamp"}, - "RiskCounts":{"shape":"RiskCounts"} + "RiskCounts":{"shape":"RiskCounts"}, + "Profiles":{ + "shape":"WorkloadProfiles", + "documentation":"

    The profiles associated with the workload.

    " + }, + "PrioritizedRiskCounts":{"shape":"RiskCounts"} }, "documentation":"

    A lens review summary of a workload.

    " }, @@ -2268,6 +2713,12 @@ "documentation":"

    The maximum number of results to return for this request.

    ", "location":"querystring", "locationName":"MaxResults" + }, + "QuestionPriority":{ + "shape":"QuestionPriority", + "documentation":"

    The priority of the question.

    ", + "location":"querystring", + "locationName":"QuestionPriority" } }, "documentation":"

    Input to list answers.

    " @@ -2401,6 +2852,12 @@ "documentation":"

    The maximum number of results to return for this request.

    ", "location":"querystring", "locationName":"MaxResults" + }, + "QuestionPriority":{ + "shape":"QuestionPriority", + "documentation":"

    The priority of the question.

    ", + "location":"querystring", + "locationName":"QuestionPriority" } }, "documentation":"

    Input to list lens review improvements.

    " @@ -2595,6 +3052,122 @@ "NextToken":{"shape":"NextToken"} } }, + "ListProfileNotificationsInput":{ + "type":"structure", + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"querystring", + "locationName":"WorkloadId" + }, + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "location":"querystring", + "locationName":"MaxResults" + } + } + }, + "ListProfileNotificationsOutput":{ + "type":"structure", + "members":{ + "NotificationSummaries":{ + "shape":"ProfileNotificationSummaries", + "documentation":"

    Notification summaries.

    " + }, + "NextToken":{"shape":"NextToken"} + } + }, + "ListProfileSharesInput":{ + "type":"structure", + "required":["ProfileArn"], + "members":{ + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

    The profile ARN.

    ", + "location":"uri", + "locationName":"ProfileArn" + }, + "SharedWithPrefix":{ + "shape":"SharedWithPrefix", + "documentation":"

    The Amazon Web Services account ID, IAM role, organization ID, or organizational unit (OU) ID with which the profile is shared.

    ", + "location":"querystring", + "locationName":"SharedWithPrefix" + }, + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"ListProfileSharesMaxResults", + "documentation":"

    The maximum number of results to return for this request.

    ", + "location":"querystring", + "locationName":"MaxResults" + }, + "Status":{ + "shape":"ShareStatus", + "location":"querystring", + "locationName":"Status" + } + } + }, + "ListProfileSharesMaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "ListProfileSharesOutput":{ + "type":"structure", + "members":{ + "ProfileShareSummaries":{ + "shape":"ProfileShareSummaries", + "documentation":"

    Profile share summaries.

    " + }, + "NextToken":{"shape":"NextToken"} + } + }, + "ListProfilesInput":{ + "type":"structure", + "members":{ + "ProfileNamePrefix":{ + "shape":"ProfileNamePrefix", + "documentation":"

    Prefix for profile name.

    ", + "location":"querystring", + "locationName":"ProfileNamePrefix" + }, + "ProfileOwnerType":{ + "shape":"ProfileOwnerType", + "documentation":"

    Profile owner type.

    ", + "location":"querystring", + "locationName":"ProfileOwnerType" + }, + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "location":"querystring", + "locationName":"MaxResults" + } + } + }, + "ListProfilesOutput":{ + "type":"structure", + "members":{ + "ProfileSummaries":{ + "shape":"ProfileSummaries", + "documentation":"

    Profile summaries.

    " + }, + "NextToken":{"shape":"NextToken"} + } + }, "ListShareInvitationsInput":{ "type":"structure", "members":{ @@ -2625,6 +3198,12 @@ "documentation":"

    The maximum number of results to return for this request.

    ", "location":"querystring", "locationName":"MaxResults" + }, + "ProfileNamePrefix":{ + "shape":"ProfileNamePrefix", + "documentation":"

    Profile name prefix.

    ", + "location":"querystring", + "locationName":"ProfileNamePrefix" } }, "documentation":"

    Input for List Share Invitations

    " @@ -2744,6 +3323,10 @@ "max":50, "min":1 }, + "MaxSelectedProfileChoices":{ + "type":"integer", + "min":0 + }, "MetricType":{ "type":"string", "enum":["WORKLOAD"] @@ -2785,6 +3368,10 @@ }, "documentation":"

    A milestone summary return object.

    " }, + "MinSelectedProfileChoices":{ + "type":"integer", + "min":0 + }, "NextToken":{ "type":"string", "documentation":"

    The token to use to retrieve the next set of results.

    " @@ -2828,7 +3415,7 @@ }, "PermissionType":{ "type":"string", - "documentation":"

    Permission granted on a workload share.

    ", + "documentation":"

    Permission granted on a share request.

    ", "enum":[ "READONLY", "CONTRIBUTOR" @@ -2899,10 +3486,280 @@ "PillarId":{"shape":"PillarId"}, "PillarName":{"shape":"PillarName"}, "Notes":{"shape":"Notes"}, - "RiskCounts":{"shape":"RiskCounts"} + "RiskCounts":{"shape":"RiskCounts"}, + "PrioritizedRiskCounts":{"shape":"RiskCounts"} }, "documentation":"

    A pillar review summary of a lens review.

    " }, + "Profile":{ + "type":"structure", + "members":{ + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

    The profile ARN.

    " + }, + "ProfileVersion":{ + "shape":"ProfileVersion", + "documentation":"

    The profile version.

    " + }, + "ProfileName":{ + "shape":"ProfileName", + "documentation":"

    The profile name.

    " + }, + "ProfileDescription":{ + "shape":"ProfileDescription", + "documentation":"

    The profile description.

    " + }, + "ProfileQuestions":{ + "shape":"ProfileQuestions", + "documentation":"

    Profile questions.

    " + }, + "Owner":{"shape":"AwsAccountId"}, + "CreatedAt":{"shape":"Timestamp"}, + "UpdatedAt":{"shape":"Timestamp"}, + "ShareInvitationId":{ + "shape":"ShareInvitationId", + "documentation":"

    The ID assigned to the share invitation.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags assigned to the profile.

    " + } + }, + "documentation":"

    A profile.

    " + }, + "ProfileArn":{ + "type":"string", + "max":2084, + "pattern":"arn:aws[-a-z]*:wellarchitected:[a-z]{2}(-gov)?-[a-z]+-\\d:\\d{12}:profile/[a-z0-9]+" + }, + "ProfileArns":{ + "type":"list", + "member":{"shape":"ProfileArn"}, + "min":1 + }, + "ProfileChoice":{ + "type":"structure", + "members":{ + "ChoiceId":{"shape":"ChoiceId"}, + "ChoiceTitle":{"shape":"ChoiceTitle"}, + "ChoiceDescription":{"shape":"ChoiceDescription"} + }, + "documentation":"

    The profile choice.

    " + }, + "ProfileDescription":{ + "type":"string", + "max":100, + "min":3, + "pattern":"^[A-Za-z0-9-_.,:/()@!&?#+'’\\s]+$" + }, + "ProfileName":{ + "type":"string", + "max":100, + "min":3, + "pattern":"^[A-Za-z0-9-_.,:/()@!&?#+'’\\s]+$" + }, + "ProfileNamePrefix":{ + "type":"string", + "max":100, + "pattern":"^[A-Za-z0-9-_.,:/()@!&?#+'’\\s]+$" + }, + "ProfileNotificationSummaries":{ + "type":"list", + "member":{"shape":"ProfileNotificationSummary"} + }, + "ProfileNotificationSummary":{ + "type":"structure", + "members":{ + "CurrentProfileVersion":{ + "shape":"ProfileVersion", + "documentation":"

    The current profile version.

    " + }, + "LatestProfileVersion":{ + "shape":"ProfileVersion", + "documentation":"

    The latest profile version.

    " + }, + "Type":{ + "shape":"ProfileNotificationType", + "documentation":"

    Type of notification.

    " + }, + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

    The profile ARN.

    " + }, + "ProfileName":{ + "shape":"ProfileName", + "documentation":"

    The profile name.

    " + }, + "WorkloadId":{"shape":"WorkloadId"}, + "WorkloadName":{"shape":"WorkloadName"} + }, + "documentation":"

    The profile notification summary.

    " + }, + "ProfileNotificationType":{ + "type":"string", + "enum":[ + "PROFILE_ANSWERS_UPDATED", + "PROFILE_DELETED" + ] + }, + "ProfileOwnerType":{ + "type":"string", + "enum":[ + "SELF", + "SHARED" + ] + }, + "ProfileQuestion":{ + "type":"structure", + "members":{ + "QuestionId":{"shape":"QuestionId"}, + "QuestionTitle":{"shape":"QuestionTitle"}, + "QuestionDescription":{"shape":"QuestionDescription"}, + "QuestionChoices":{ + "shape":"ProfileQuestionChoices", + "documentation":"

    The question choices.

    " + }, + "SelectedChoiceIds":{ + "shape":"SelectedChoiceIds", + "documentation":"

    The selected choices.

    " + }, + "MinSelectedChoices":{ + "shape":"MinSelectedProfileChoices", + "documentation":"

    The minimum number of selected choices.

    " + }, + "MaxSelectedChoices":{ + "shape":"MaxSelectedProfileChoices", + "documentation":"

    The maximum number of selected choices.

    " + } + }, + "documentation":"

    A profile question.

    " + }, + "ProfileQuestionChoices":{ + "type":"list", + "member":{"shape":"ProfileChoice"} + }, + "ProfileQuestionUpdate":{ + "type":"structure", + "members":{ + "QuestionId":{"shape":"QuestionId"}, + "SelectedChoiceIds":{ + "shape":"SelectedProfileChoiceIds", + "documentation":"

    The selected choices.

    " + } + }, + "documentation":"

    An update to a profile question.

    " + }, + "ProfileQuestionUpdates":{ + "type":"list", + "member":{"shape":"ProfileQuestionUpdate"} + }, + "ProfileQuestions":{ + "type":"list", + "member":{"shape":"ProfileQuestion"} + }, + "ProfileShareSummaries":{ + "type":"list", + "member":{"shape":"ProfileShareSummary"} + }, + "ProfileShareSummary":{ + "type":"structure", + "members":{ + "ShareId":{"shape":"ShareId"}, + "SharedWith":{"shape":"SharedWith"}, + "Status":{"shape":"ShareStatus"}, + "StatusMessage":{ + "shape":"StatusMessage", + "documentation":"

    Profile share invitation status message.

    " + } + }, + "documentation":"

    Summary of a profile share.

    " + }, + "ProfileSummaries":{ + "type":"list", + "member":{"shape":"ProfileSummary"} + }, + "ProfileSummary":{ + "type":"structure", + "members":{ + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

    The profile ARN.

    " + }, + "ProfileVersion":{ + "shape":"ProfileVersion", + "documentation":"

    The profile version.

    " + }, + "ProfileName":{ + "shape":"ProfileName", + "documentation":"

    The profile name.

    " + }, + "ProfileDescription":{ + "shape":"ProfileDescription", + "documentation":"

    The profile description.

    " + }, + "Owner":{"shape":"AwsAccountId"}, + "CreatedAt":{"shape":"Timestamp"}, + "UpdatedAt":{"shape":"Timestamp"} + }, + "documentation":"

    Summary of a profile.

    " + }, + "ProfileTemplate":{ + "type":"structure", + "members":{ + "TemplateName":{ + "shape":"ProfileName", + "documentation":"

    The name of the profile template.

    " + }, + "TemplateQuestions":{ + "shape":"TemplateQuestions", + "documentation":"

    Profile template questions.

    " + }, + "CreatedAt":{"shape":"Timestamp"}, + "UpdatedAt":{"shape":"Timestamp"} + }, + "documentation":"

    The profile template.

    " + }, + "ProfileTemplateChoice":{ + "type":"structure", + "members":{ + "ChoiceId":{"shape":"ChoiceId"}, + "ChoiceTitle":{"shape":"ChoiceTitle"}, + "ChoiceDescription":{"shape":"ChoiceDescription"} + }, + "documentation":"

    A profile template choice.

    " + }, + "ProfileTemplateQuestion":{ + "type":"structure", + "members":{ + "QuestionId":{"shape":"QuestionId"}, + "QuestionTitle":{"shape":"QuestionTitle"}, + "QuestionDescription":{"shape":"QuestionDescription"}, + "QuestionChoices":{ + "shape":"ProfileTemplateQuestionChoices", + "documentation":"

    The question choices.

    " + }, + "MinSelectedChoices":{ + "shape":"MinSelectedProfileChoices", + "documentation":"

    The minimum number of choices selected.

    " + }, + "MaxSelectedChoices":{ + "shape":"MaxSelectedProfileChoices", + "documentation":"

    The maximum number of choices selected.

    " + } + }, + "documentation":"

    A profile template question.

    " + }, + "ProfileTemplateQuestionChoices":{ + "type":"list", + "member":{"shape":"ProfileTemplateChoice"} + }, + "ProfileVersion":{ + "type":"string", + "max":32, + "min":1, + "pattern":"^[A-Za-z0-9-]+$" + }, "QuestionDescription":{ "type":"string", "documentation":"

    The description of the question.

    ", @@ -2947,12 +3804,26 @@ "type":"list", "member":{"shape":"QuestionMetric"} }, + "QuestionPriority":{ + "type":"string", + "enum":[ + "PRIORITIZED", + "NONE" + ] + }, "QuestionTitle":{ "type":"string", "documentation":"

    The title of the question.

    ", "max":512, "min":1 }, + "QuestionType":{ + "type":"string", + "enum":[ + "PRIORITIZED", + "NON_PRIORITIZED" + ] + }, "QuotaCode":{ "type":"string", "documentation":"

    Service Quotas requirement to identify originating quota.

    " @@ -2997,11 +3868,19 @@ "value":{"shape":"Count"}, "documentation":"

    A map from risk names to the count of how many questions have that rating.

    " }, + "SelectedChoiceIds":{ + "type":"list", + "member":{"shape":"ChoiceId"} + }, "SelectedChoices":{ "type":"list", "member":{"shape":"ChoiceId"}, "documentation":"

    List of selected choice IDs in a question answer.

    The values entered replace the previously selected choices.

    " }, + "SelectedProfileChoiceIds":{ + "type":"list", + "member":{"shape":"ChoiceId"} + }, "ServiceCode":{ "type":"string", "documentation":"

    Service Quotas requirement to identify originating service.

    " @@ -3026,7 +3905,7 @@ }, "ShareId":{ "type":"string", - "documentation":"

    The ID associated with the workload share.

    ", + "documentation":"

    The ID associated with the share.

    ", "pattern":"[0-9a-f]{32}" }, "ShareInvitation":{ @@ -3045,6 +3924,10 @@ "LensArn":{ "shape":"LensArn", "documentation":"

    The ARN for the lens.

    " + }, + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

    The profile ARN.

    " } }, "documentation":"

    The share invitation.

    " @@ -3085,6 +3968,14 @@ "LensArn":{ "shape":"LensArn", "documentation":"

    The ARN for the lens.

    " + }, + "ProfileName":{ + "shape":"ProfileName", + "documentation":"

    The profile name.

    " + }, + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

    The profile ARN.

    " } }, "documentation":"

    A share invitation summary return object.

    " @@ -3093,12 +3984,13 @@ "type":"string", "enum":[ "WORKLOAD", - "LENS" + "LENS", + "PROFILE" ] }, "ShareStatus":{ "type":"string", - "documentation":"

    The status of a workload share.

    ", + "documentation":"

    The status of the share request.

    ", "enum":[ "ACCEPTED", "REJECTED", @@ -3112,7 +4004,7 @@ }, "SharedWith":{ "type":"string", - "documentation":"

    The Amazon Web Services account ID, IAM role, organization ID, or organizational unit (OU) ID with which the workload is shared.

    ", + "documentation":"

    The Amazon Web Services account ID, IAM role, organization ID, or organizational unit (OU) ID with which the workload, lens, or profile is shared.

    ", "max":2048, "min":12 }, @@ -3171,6 +4063,10 @@ "max":256, "min":0 }, + "TemplateQuestions":{ + "type":"list", + "member":{"shape":"ProfileTemplateQuestion"} + }, "ThrottlingException":{ "type":"structure", "required":["Message"], @@ -3312,6 +4208,35 @@ }, "documentation":"

    Output of a update lens review call.

    " }, + "UpdateProfileInput":{ + "type":"structure", + "required":["ProfileArn"], + "members":{ + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

    The profile ARN.

    ", + "location":"uri", + "locationName":"ProfileArn" + }, + "ProfileDescription":{ + "shape":"ProfileDescription", + "documentation":"

    The profile description.

    " + }, + "ProfileQuestions":{ + "shape":"ProfileQuestionUpdates", + "documentation":"

    Profile questions.

    " + } + } + }, + "UpdateProfileOutput":{ + "type":"structure", + "members":{ + "Profile":{ + "shape":"Profile", + "documentation":"

    The profile.

    " + } + } + }, "UpdateShareInvitationInput":{ "type":"structure", "required":[ @@ -3434,6 +4359,31 @@ "ClientRequestToken":{"shape":"ClientRequestToken"} } }, + "UpgradeProfileVersionInput":{ + "type":"structure", + "required":[ + "WorkloadId", + "ProfileArn" + ], + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "location":"uri", + "locationName":"WorkloadId" + }, + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

    The profile ARN.

    ", + "location":"uri", + "locationName":"ProfileArn" + }, + "MilestoneName":{"shape":"MilestoneName"}, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + } + } + }, "Urls":{ "type":"list", "member":{"shape":"ChoiceContent"} @@ -3533,7 +4483,12 @@ "Applications":{ "shape":"WorkloadApplications", "documentation":"

    List of AppRegistry application ARNs associated to the workload.

    " - } + }, + "Profiles":{ + "shape":"WorkloadProfiles", + "documentation":"

    Profile associated with a workload.

    " + }, + "PrioritizedRiskCounts":{"shape":"RiskCounts"} }, "documentation":"

    A workload return object.

    " }, @@ -3595,6 +4550,8 @@ "WorkloadId":{ "type":"string", "documentation":"

    The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

    ", + "max":32, + "min":32, "pattern":"[0-9a-f]{32}" }, "WorkloadImprovementStatus":{ @@ -3650,6 +4607,30 @@ "member":{"shape":"PillarId"}, "documentation":"

    The priorities of the pillars, which are used to order items in the improvement plan. Each pillar is represented by its PillarReviewSummary$PillarId.

    " }, + "WorkloadProfile":{ + "type":"structure", + "members":{ + "ProfileArn":{ + "shape":"ProfileArn", + "documentation":"

    The profile ARN.

    " + }, + "ProfileVersion":{ + "shape":"ProfileVersion", + "documentation":"

    The profile version.

    " + } + }, + "documentation":"

    The profile associated with a workload.

    " + }, + "WorkloadProfileArns":{ + "type":"list", + "member":{"shape":"ProfileArn"}, + "max":1 + }, + "WorkloadProfiles":{ + "type":"list", + "member":{"shape":"WorkloadProfile"}, + "max":1 + }, "WorkloadResourceDefinition":{ "type":"list", "member":{"shape":"DefinitionType"} @@ -3707,7 +4688,12 @@ "UpdatedAt":{"shape":"Timestamp"}, "Lenses":{"shape":"WorkloadLenses"}, "RiskCounts":{"shape":"RiskCounts"}, - "ImprovementStatus":{"shape":"WorkloadImprovementStatus"} + "ImprovementStatus":{"shape":"WorkloadImprovementStatus"}, + "Profiles":{ + "shape":"WorkloadProfiles", + "documentation":"

    Profile associated with a workload.

    " + }, + "PrioritizedRiskCounts":{"shape":"RiskCounts"} }, "documentation":"

    A workload summary return object.

    " } diff --git a/services/wisdom/pom.xml b/services/wisdom/pom.xml index 5b89c0ced3c5..abc0ef106240 100644 --- a/services/wisdom/pom.xml +++ b/services/wisdom/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT wisdom AWS Java SDK :: Services :: Wisdom diff --git a/services/workdocs/pom.xml b/services/workdocs/pom.xml index 8d3eeaaacf1d..a601f7ab6b82 100644 --- a/services/workdocs/pom.xml +++ b/services/workdocs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT workdocs AWS Java SDK :: Services :: Amazon WorkDocs diff --git a/services/worklink/pom.xml b/services/worklink/pom.xml index 428f7e044989..e54fe002c7f8 100644 --- a/services/worklink/pom.xml +++ b/services/worklink/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT worklink AWS Java SDK :: Services :: WorkLink diff --git a/services/workmail/pom.xml b/services/workmail/pom.xml index b3b8ff192dfb..8328b77de8ae 100644 --- a/services/workmail/pom.xml +++ b/services/workmail/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 workmail diff --git a/services/workmailmessageflow/pom.xml b/services/workmailmessageflow/pom.xml index f72e05515ea3..f063f5999f8a 100644 --- a/services/workmailmessageflow/pom.xml +++ b/services/workmailmessageflow/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT workmailmessageflow AWS Java SDK :: Services :: WorkMailMessageFlow diff --git a/services/workspaces/pom.xml b/services/workspaces/pom.xml index ffc87249bfaf..015ce4d8b223 100644 --- a/services/workspaces/pom.xml +++ b/services/workspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT workspaces AWS Java SDK :: Services :: Amazon WorkSpaces diff --git a/services/workspacesweb/pom.xml b/services/workspacesweb/pom.xml index 155e239bd57c..0f1c5f1a052d 100644 --- a/services/workspacesweb/pom.xml +++ b/services/workspacesweb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT workspacesweb AWS Java SDK :: Services :: Work Spaces Web diff --git a/services/xray/pom.xml b/services/xray/pom.xml index af76cc0230fb..78dbc3436e47 100644 --- a/services/xray/pom.xml +++ b/services/xray/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT xray AWS Java SDK :: Services :: AWS X-Ray diff --git a/test/auth-tests/pom.xml b/test/auth-tests/pom.xml index 7b7f8511a9ec..d44c9d4581b6 100644 --- a/test/auth-tests/pom.xml +++ b/test/auth-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/codegen-generated-classes-test/pom.xml b/test/codegen-generated-classes-test/pom.xml index 290cbdcbf0be..1a3b5c626589 100644 --- a/test/codegen-generated-classes-test/pom.xml +++ b/test/codegen-generated-classes-test/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ../../pom.xml diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/HttpChecksumInHeaderTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/HttpChecksumInHeaderTest.java index 029a19447047..8ba47dee79cf 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/HttpChecksumInHeaderTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/HttpChecksumInHeaderTest.java @@ -17,7 +17,6 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.any; -import static software.amazon.awssdk.core.HttpChecksumConstant.HTTP_CHECKSUM_VALUE; import io.reactivex.Flowable; import java.io.IOException; @@ -28,7 +27,6 @@ import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.stream.Collectors; -import org.junit.After; import org.junit.Before; import org.junit.Test; import org.mockito.ArgumentCaptor; @@ -38,9 +36,6 @@ import software.amazon.awssdk.awscore.client.builder.AwsClientBuilder; import software.amazon.awssdk.awscore.client.builder.AwsSyncClientBuilder; import software.amazon.awssdk.core.checksums.Algorithm; -import software.amazon.awssdk.core.interceptor.Context; -import software.amazon.awssdk.core.interceptor.ExecutionAttributes; -import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; import software.amazon.awssdk.http.ExecutableHttpRequest; import software.amazon.awssdk.http.HttpExecuteRequest; import software.amazon.awssdk.http.HttpExecuteResponse; @@ -103,11 +98,6 @@ public void setup() throws IOException { }); } - @After - public void clear() { - CaptureChecksumValueInterceptor.reset(); - } - @Test public void sync_json_nonStreaming_unsignedPayload_with_Sha1_in_header() { // jsonClient.flexibleCheckSumOperationWithShaChecksum(r -> r.stringMember("Hello world")); @@ -118,9 +108,6 @@ public void sync_json_nonStreaming_unsignedPayload_with_Sha1_in_header() { assertThat(getSyncRequest().firstMatchingHeader("x-amz-checksum-sha1")).hasValue("M68rRwFal7o7B3KEMt3m0w39TaA="); // Assertion to make sure signer was not executed assertThat(getSyncRequest().firstMatchingHeader("x-amz-content-sha256")).isNotPresent(); - - assertThat(CaptureChecksumValueInterceptor.interceptorComputedChecksum).isEqualTo("M68rRwFal7o7B3KEMt3m0w39TaA="); - } @Test @@ -133,9 +120,6 @@ public void aync_json_nonStreaming_unsignedPayload_with_Sha1_in_header() { assertThat(getAsyncRequest().firstMatchingHeader("x-amz-checksum-sha1")).hasValue("M68rRwFal7o7B3KEMt3m0w39TaA="); // Assertion to make sure signer was not executed assertThat(getAsyncRequest().firstMatchingHeader("x-amz-content-sha256")).isNotPresent(); - assertThat(CaptureChecksumValueInterceptor.interceptorComputedChecksum).isEqualTo("M68rRwFal7o7B3KEMt3m0w39TaA="); - - } @Test @@ -148,9 +132,6 @@ public void sync_xml_nonStreaming_unsignedPayload_with_Sha1_in_header() { assertThat(getSyncRequest().firstMatchingHeader("x-amz-checksum-sha1")).hasValue("FB/utBbwFLbIIt5ul3Ojuy5dKgU="); // Assertion to make sure signer was not executed assertThat(getSyncRequest().firstMatchingHeader("x-amz-content-sha256")).isNotPresent(); - - assertThat(CaptureChecksumValueInterceptor.interceptorComputedChecksum).isEqualTo("FB/utBbwFLbIIt5ul3Ojuy5dKgU="); - } @Test @@ -169,9 +150,6 @@ public void sync_xml_nonStreaming_unsignedEmptyPayload_with_Sha1_in_header() { // Assertion to make sure signer was not executed assertThat(getSyncRequest().firstMatchingHeader("x-amz-content-sha256")).isNotPresent(); - - assertThat(CaptureChecksumValueInterceptor.interceptorComputedChecksum).isNull(); - } @Test @@ -185,8 +163,6 @@ public void aync_xml_nonStreaming_unsignedPayload_with_Sha1_in_header() { assertThat(getAsyncRequest().firstMatchingHeader("x-amz-checksum-sha1")).hasValue("FB/utBbwFLbIIt5ul3Ojuy5dKgU="); // Assertion to make sure signer was not executed assertThat(getAsyncRequest().firstMatchingHeader("x-amz-content-sha256")).isNotPresent(); - assertThat(CaptureChecksumValueInterceptor.interceptorComputedChecksum).isEqualTo("FB/utBbwFLbIIt5ul3Ojuy5dKgU="); - } @Test @@ -206,8 +182,6 @@ public void aync_xml_nonStreaming_unsignedEmptyPayload_with_Sha1_in_header() { assertThat(getAsyncRequest().firstMatchingHeader("x-amz-checksum-sha1")).isNotPresent(); // Assertion to make sure signer was not executed assertThat(getAsyncRequest().firstMatchingHeader("x-amz-content-sha256")).isNotPresent(); - assertThat(CaptureChecksumValueInterceptor.interceptorComputedChecksum).isNull(); - } private SdkHttpRequest getSyncRequest() { @@ -224,32 +198,15 @@ private SdkHttpRequest getAsyncRequest() { private & AwsClientBuilder> T initializeSync(T syncClientBuilder) { - return initialize(syncClientBuilder.httpClient(httpClient) - .overrideConfiguration(o -> o.addExecutionInterceptor(new CaptureChecksumValueInterceptor()))); + return initialize(syncClientBuilder.httpClient(httpClient)); } private & AwsClientBuilder> T initializeAsync(T asyncClientBuilder) { - return initialize(asyncClientBuilder.httpClient(httpAsyncClient) - .overrideConfiguration(o -> o.addExecutionInterceptor(new CaptureChecksumValueInterceptor()))); + return initialize(asyncClientBuilder.httpClient(httpAsyncClient)); } private > T initialize(T clientBuilder) { return clientBuilder.credentialsProvider(AnonymousCredentialsProvider.create()) .region(Region.US_WEST_2); } - - - private static class CaptureChecksumValueInterceptor implements ExecutionInterceptor { - private static String interceptorComputedChecksum; - - private static void reset() { - interceptorComputedChecksum = null; - } - - @Override - public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { - interceptorComputedChecksum = executionAttributes.getAttribute(HTTP_CHECKSUM_VALUE); - - } - } } \ No newline at end of file diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolquery/MoveQueryParamsToBodyTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolquery/MoveQueryParamsToBodyTest.java deleted file mode 100644 index 7f2b32fa668c..000000000000 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolquery/MoveQueryParamsToBodyTest.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.protocolquery; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.io.IOException; -import java.util.Optional; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.mockito.ArgumentCaptor; -import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; -import software.amazon.awssdk.core.exception.SdkClientException; -import software.amazon.awssdk.core.interceptor.Context; -import software.amazon.awssdk.core.interceptor.ExecutionAttributes; -import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; -import software.amazon.awssdk.http.ContentStreamProvider; -import software.amazon.awssdk.http.ExecutableHttpRequest; -import software.amazon.awssdk.http.HttpExecuteRequest; -import software.amazon.awssdk.http.SdkHttpClient; -import software.amazon.awssdk.http.SdkHttpRequest; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.utils.IoUtils; - -public class MoveQueryParamsToBodyTest { - private static final AwsCredentialsProvider CREDENTIALS = StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid")); - - private SdkHttpClient mockHttpClient; - - private ProtocolQueryClient client; - - @BeforeEach - public void setup() throws IOException { - mockHttpClient = mock(SdkHttpClient.class); - ExecutableHttpRequest mockRequest = mock(ExecutableHttpRequest.class); - when(mockRequest.call()).thenThrow(new IOException("IO error!")); - when(mockHttpClient.prepareRequest(any())).thenReturn(mockRequest); - } - - @AfterEach - public void teardown() { - if (client != null) { - client.close(); - } - client = null; - } - - @Test - public void customInterceptor_additionalQueryParamsAdded_paramsAlsoMovedToBody() throws IOException { - client = ProtocolQueryClient.builder() - .overrideConfiguration(o -> o.addExecutionInterceptor(new AdditionalQueryParamInterceptor())) - .region(Region.US_WEST_2) - .credentialsProvider(CREDENTIALS) - .httpClient(mockHttpClient) - .build(); - - ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(HttpExecuteRequest.class); - - assertThatThrownBy(() -> client.membersInQueryParams(r -> r.stringQueryParam("hello"))) - .isInstanceOf(SdkClientException.class) - .hasMessageContaining("IO"); - - verify(mockHttpClient, atLeast(1)).prepareRequest(requestCaptor.capture()); - - ContentStreamProvider requestContent = requestCaptor.getValue().contentStreamProvider().get(); - - String contentString = IoUtils.toUtf8String(requestContent.newStream()); - - assertThat(contentString).contains("CustomParamName=CustomParamValue"); - } - - private static class AdditionalQueryParamInterceptor implements ExecutionInterceptor { - @Override - public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) { - return context.httpRequest().toBuilder() - .putRawQueryParameter("CustomParamName", "CustomParamValue") - .build(); - } - } -} diff --git a/test/http-client-tests/pom.xml b/test/http-client-tests/pom.xml index 775e4e820277..23c4bfc9aac5 100644 --- a/test/http-client-tests/pom.xml +++ b/test/http-client-tests/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ../../pom.xml http-client-tests diff --git a/test/module-path-tests/pom.xml b/test/module-path-tests/pom.xml index 636cefc30ac1..5ee03540a741 100644 --- a/test/module-path-tests/pom.xml +++ b/test/module-path-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests-core/pom.xml b/test/protocol-tests-core/pom.xml index f56c9117620a..a55293bdb28e 100644 --- a/test/protocol-tests-core/pom.xml +++ b/test/protocol-tests-core/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests/pom.xml b/test/protocol-tests/pom.xml index 641392d5686b..b35529985325 100644 --- a/test/protocol-tests/pom.xml +++ b/test/protocol-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/connection/SyncClientConnectionInterruptionTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/connection/SyncClientConnectionInterruptionTest.java new file mode 100644 index 000000000000..1a17986dc30f --- /dev/null +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/connection/SyncClientConnectionInterruptionTest.java @@ -0,0 +1,205 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocol.tests.connection; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.post; +import static com.github.tomakehurst.wiremock.client.WireMock.urlMatching; +import static org.assertj.core.api.Assertions.assertThat; + +import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.client.ResponseDefinitionBuilder; +import com.github.tomakehurst.wiremock.core.WireMockConfiguration; +import java.net.URI; +import java.time.Duration; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicLong; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.exception.AbortedException; +import software.amazon.awssdk.core.exception.ApiCallAttemptTimeoutException; +import software.amazon.awssdk.core.retry.RetryPolicy; +import software.amazon.awssdk.http.SdkHttpClient; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.metrics.MetricRecord; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClientBuilder; +import software.amazon.awssdk.services.protocolrestjson.model.AllTypesResponse; + +/** + * Tests to verify Interruption of Threads while Http Connection is in progress to make sure Resources are released. + */ +class SyncClientConnectionInterruptionTest { + public static final String SAMPLE_BODY = "{\"StringMember" + + "\":\"resultString\"}"; + private final WireMockServer mockServer = new WireMockServer(new WireMockConfiguration() + .bindAddress("localhost").dynamicPort()); + + private static final ExecutorService executorService = Executors.newCachedThreadPool(); + + @BeforeEach + public void setup() { + mockServer.start(); + stubPostRequest(".*", aResponse(), "{}"); + } + + @AfterAll + public static void cleanUp(){ + executorService.shutdownNow(); + } + + @Test + void connectionPoolsGetsReusedWhenInterruptedWith_1_MaxConnection() throws Exception { + Integer responseDelay = 1500; + + String urlRegex = "/2016-03-11/allTypes"; + stubPostRequest(urlRegex, aResponse().withFixedDelay(responseDelay), SAMPLE_BODY); + SdkHttpClient httpClient = ApacheHttpClient.builder().maxConnections(1).build(); + ProtocolRestJsonClient client = getClient(httpClient, Duration.ofMillis(2L * responseDelay)).build(); + + Future toBeInterruptedFuture = executorService.submit(() -> client.allTypes()); + unInterruptedSleep(responseDelay - responseDelay / 5); + toBeInterruptedFuture.cancel(true); + // Make sure thread start the Http connections + unInterruptedSleep(50); + AllTypesResponse allTypesResponse = client.allTypes(); + assertThat(allTypesResponse.stringMember()).isEqualTo("resultString"); + executorService.shutdownNow(); + } + + @Test + void interruptionWhenWaitingForLease_AbortsImmediately() throws InterruptedException { + Integer responseDelay = 50000; + ExceptionInThreadRun exceptionInThreadRun = new ExceptionInThreadRun(); + AtomicLong leaseWaitingTime = new AtomicLong(responseDelay); + stubPostRequest("/2016-03-11/allTypes", aResponse().withFixedDelay(responseDelay), SAMPLE_BODY); + SdkHttpClient httpClient = ApacheHttpClient.builder().maxConnections(1).build(); + ProtocolRestJsonClient client = getClient(httpClient, Duration.ofMillis(2L * responseDelay)).build(); + executorService.submit(() -> client.allTypes()); + // 1 Sec sleep to make sure Thread 1 is picked for executing Http connection + unInterruptedSleep(1000); + Thread leaseWaitingThread = new Thread(() -> { + + try { + client.allTypes(l -> l.overrideConfiguration( + b -> b + .addMetricPublisher(new MetricPublisher() { + @Override + public void publish(MetricCollection metricCollection) { + Optional> apiCallDuration = + metricCollection.stream().filter(o -> "ApiCallDuration".equals(o.metric().name())).findAny(); + leaseWaitingTime.set(Duration.parse(apiCallDuration.get().value().toString()).toMillis()); + } + + @Override + public void close() { + } + }) + )); + + } catch (Exception exception) { + exceptionInThreadRun.setException(exception); + + } + }); + + leaseWaitingThread.start(); + // 1 sec sleep to make sure Http connection execution is initialized for Thread 2 , in this case it will wait for lease + // and immediately terminate on interrupt + unInterruptedSleep(1000); + leaseWaitingThread.interrupt(); + leaseWaitingThread.join(); + assertThat(leaseWaitingTime.get()).isNotEqualTo(responseDelay.longValue()); + assertThat(leaseWaitingTime.get()).isLessThan(responseDelay.longValue()); + assertThat(exceptionInThreadRun.getException()).isInstanceOf(AbortedException.class); + client.close(); + } + + /** + * Service Latency is set to high value say X. + * Api timeout value id set to 1/3 of X. + * And we interrupt the thread at 90% of X. + * In this case since the ApiTimeOut first happened we should get ApiTimeOut Exception and not the interrupt. + */ + @Test + void interruptionDueToApiTimeOut_followed_byInterruptCausesOnlyTimeOutException() throws InterruptedException { + SdkHttpClient httpClient = ApacheHttpClient.create(); + Integer responseDelay = 3000; + stubPostRequest("/2016-03-11/allTypes", aResponse().withFixedDelay(responseDelay), SAMPLE_BODY); + ExceptionInThreadRun exception = new ExceptionInThreadRun(); + ProtocolRestJsonClient client = + getClient(httpClient, Duration.ofMillis(10)).overrideConfiguration(o -> o.retryPolicy(RetryPolicy.none())).build(); + unInterruptedSleep(100); + // We need to creat a separate thread to interrupt it externally. + Thread leaseWaitingThread = new Thread(() -> { + try { + client.allTypes(l -> l.overrideConfiguration(b -> b.apiCallAttemptTimeout(Duration.ofMillis(responseDelay / 3)))); + } catch (Exception e) { + exception.setException(e); + } + }); + leaseWaitingThread.start(); + unInterruptedSleep(responseDelay - responseDelay / 10); + leaseWaitingThread.interrupt(); + leaseWaitingThread.join(); + assertThat(exception.getException()).isInstanceOf(ApiCallAttemptTimeoutException.class); + client.close(); + } + + private class ExceptionInThreadRun { + private Exception exception; + public Exception getException() { + return exception; + } + public void setException(Exception exception) { + this.exception = exception; + } + } + + static void unInterruptedSleep(long millis){ + try { + Thread.sleep(millis); + } catch (InterruptedException e) { + throw new IllegalStateException("This test sleep is not be interrupted"); + } + } + + private void stubPostRequest(String urlRegex, ResponseDefinitionBuilder LONG_DELAY, String body) { + mockServer.stubFor(post(urlMatching(urlRegex)) + .willReturn(LONG_DELAY + .withStatus(200) + .withBody(body))); + } + private ProtocolRestJsonClientBuilder getClient(SdkHttpClient httpClient, Duration timeOutDuration) { + return ProtocolRestJsonClient.builder() + .credentialsProvider( + StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) + .endpointOverride(URI.create("http://localhost:" + mockServer.port())) + .httpClient(httpClient) + .overrideConfiguration(o -> o.apiCallTimeout(timeOutDuration)); + + } +} diff --git a/test/region-testing/pom.xml b/test/region-testing/pom.xml index aa93b61415ab..e14bd8773644 100644 --- a/test/region-testing/pom.xml +++ b/test/region-testing/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/ruleset-testing-core/pom.xml b/test/ruleset-testing-core/pom.xml index 06785acec815..978675fa3f50 100644 --- a/test/ruleset-testing-core/pom.xml +++ b/test/ruleset-testing-core/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/s3-benchmarks/pom.xml b/test/s3-benchmarks/pom.xml index 497f1cc6c555..e21561a9dec3 100644 --- a/test/s3-benchmarks/pom.xml +++ b/test/s3-benchmarks/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/sdk-benchmarks/pom.xml b/test/sdk-benchmarks/pom.xml index e7afe2513206..66e7ebce5f54 100644 --- a/test/sdk-benchmarks/pom.xml +++ b/test/sdk-benchmarks/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ../../pom.xml diff --git a/test/sdk-native-image-test/pom.xml b/test/sdk-native-image-test/pom.xml index eddf600713ff..7d2379dd9f6b 100644 --- a/test/sdk-native-image-test/pom.xml +++ b/test/sdk-native-image-test/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/service-test-utils/pom.xml b/test/service-test-utils/pom.xml index f63ac0099683..5738a34fac3c 100644 --- a/test/service-test-utils/pom.xml +++ b/test/service-test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ../../pom.xml service-test-utils diff --git a/test/stability-tests/pom.xml b/test/stability-tests/pom.xml index eab70d4404cd..4a66fafb2969 100644 --- a/test/stability-tests/pom.xml +++ b/test/stability-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/test-utils/pom.xml b/test/test-utils/pom.xml index c8b2fff66219..1850ec461fb8 100644 --- a/test/test-utils/pom.xml +++ b/test/test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ../../pom.xml test-utils diff --git a/test/tests-coverage-reporting/pom.xml b/test/tests-coverage-reporting/pom.xml index abb3aad12184..9b14a98b1ce7 100644 --- a/test/tests-coverage-reporting/pom.xml +++ b/test/tests-coverage-reporting/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/third-party/pom.xml b/third-party/pom.xml index c48bf217b560..d88aa9351cd6 100644 --- a/third-party/pom.xml +++ b/third-party/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT third-party diff --git a/third-party/third-party-jackson-core/pom.xml b/third-party/third-party-jackson-core/pom.xml index b5037cc43650..95be0294c4d8 100644 --- a/third-party/third-party-jackson-core/pom.xml +++ b/third-party/third-party-jackson-core/pom.xml @@ -20,7 +20,7 @@ third-party software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 diff --git a/third-party/third-party-jackson-dataformat-cbor/pom.xml b/third-party/third-party-jackson-dataformat-cbor/pom.xml index 57c3e7ae0d9d..f1178462067c 100644 --- a/third-party/third-party-jackson-dataformat-cbor/pom.xml +++ b/third-party/third-party-jackson-dataformat-cbor/pom.xml @@ -20,7 +20,7 @@ third-party software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 diff --git a/utils/pom.xml b/utils/pom.xml index 88162e454605..e53afb175589 100644 --- a/utils/pom.xml +++ b/utils/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.83-SNAPSHOT + 2.20.90-SNAPSHOT 4.0.0 diff --git a/utils/src/main/java/software/amazon/awssdk/utils/BinaryUtils.java b/utils/src/main/java/software/amazon/awssdk/utils/BinaryUtils.java index e7fd8c015e1d..192ea7cead9b 100644 --- a/utils/src/main/java/software/amazon/awssdk/utils/BinaryUtils.java +++ b/utils/src/main/java/software/amazon/awssdk/utils/BinaryUtils.java @@ -117,6 +117,80 @@ public static ByteArrayInputStream toStream(ByteBuffer byteBuffer) { return new ByteArrayInputStream(copyBytesFrom(byteBuffer)); } + /** + * Returns an immutable copy of the given {@code ByteBuffer}. + *

    + * The new buffer's position will be set to the position of the given {@code ByteBuffer}, but the mark if defined will be + * ignored. + *

    + * NOTE: this method intentionally converts direct buffers to non-direct though there is no guarantee this will always + * be the case, if this is required see {@link #toNonDirectBuffer(ByteBuffer)} + * + * @param bb the source {@code ByteBuffer} to copy. + * @return a read only {@code ByteBuffer}. + */ + public static ByteBuffer immutableCopyOf(ByteBuffer bb) { + if (bb == null) { + return null; + } + int sourceBufferPosition = bb.position(); + ByteBuffer readOnlyCopy = bb.asReadOnlyBuffer(); + readOnlyCopy.rewind(); + ByteBuffer cloned = ByteBuffer.allocate(readOnlyCopy.capacity()) + .put(readOnlyCopy); + cloned.position(sourceBufferPosition); + return cloned.asReadOnlyBuffer(); + } + + /** + * Returns an immutable copy of the remaining bytes of the given {@code ByteBuffer}. + *

    + * NOTE: this method intentionally converts direct buffers to non-direct though there is no guarantee this will always + * be the case, if this is required see {@link #toNonDirectBuffer(ByteBuffer)} + * + * @param bb the source {@code ByteBuffer} to copy. + * @return a read only {@code ByteBuffer}. + */ + public static ByteBuffer immutableCopyOfRemaining(ByteBuffer bb) { + if (bb == null) { + return null; + } + ByteBuffer readOnlyCopy = bb.asReadOnlyBuffer(); + ByteBuffer cloned = ByteBuffer.allocate(readOnlyCopy.remaining()) + .put(readOnlyCopy); + cloned.flip(); + return cloned.asReadOnlyBuffer(); + } + + /** + * Returns a copy of the given {@code DirectByteBuffer} from its current position as a non-direct {@code HeapByteBuffer} + *

    + * The new buffer's position will be set to the position of the given {@code ByteBuffer}, but the mark if defined will be + * ignored. + * + * @param bb the source {@code ByteBuffer} to copy. + * @return {@code ByteBuffer}. + */ + public static ByteBuffer toNonDirectBuffer(ByteBuffer bb) { + if (bb == null) { + return null; + } + if (!bb.isDirect()) { + throw new IllegalArgumentException("Provided ByteBuffer is already non-direct"); + } + int sourceBufferPosition = bb.position(); + ByteBuffer readOnlyCopy = bb.asReadOnlyBuffer(); + readOnlyCopy.rewind(); + ByteBuffer cloned = ByteBuffer.allocate(bb.capacity()) + .put(readOnlyCopy); + cloned.rewind(); + cloned.position(sourceBufferPosition); + if (bb.isReadOnly()) { + return cloned.asReadOnlyBuffer(); + } + return cloned; + } + /** * Returns a copy of all the bytes from the given ByteBuffer, * from the beginning to the buffer's limit; or null if the input is null. diff --git a/utils/src/main/java/software/amazon/awssdk/utils/ClassLoaderHelper.java b/utils/src/main/java/software/amazon/awssdk/utils/ClassLoaderHelper.java new file mode 100644 index 000000000000..487ae82df18d --- /dev/null +++ b/utils/src/main/java/software/amazon/awssdk/utils/ClassLoaderHelper.java @@ -0,0 +1,150 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.utils; + + +import software.amazon.awssdk.annotations.SdkProtectedApi; + +@SdkProtectedApi +public final class ClassLoaderHelper { + + private ClassLoaderHelper() { + } + + private static Class loadClassViaClasses(String fqcn, Class[] classes) { + if (classes == null) { + return null; + } + + for (Class clzz: classes) { + if (clzz == null) { + continue; + } + ClassLoader loader = clzz.getClassLoader(); + if (loader != null) { + try { + return loader.loadClass(fqcn); + } catch (ClassNotFoundException e) { + // move on to try the next class loader + } + } + } + return null; + } + + private static Class loadClassViaContext(String fqcn) { + ClassLoader loader = contextClassLoader(); + try { + return loader == null ? null : loader.loadClass(fqcn); + } catch (ClassNotFoundException e) { + // Ignored. + } + return null; + } + + /** + * Loads the class via the optionally specified classes in the order of + * their specification, and if not found, via the context class loader of + * the current thread, and if not found, from the caller class loader as the + * last resort. + * + * @param fqcn + * fully qualified class name of the target class to be loaded + * @param classes + * class loader providers + * @return the class loaded; never null + * + * @throws ClassNotFoundException + * if failed to load the class + */ + public static Class loadClass(String fqcn, Class... classes) throws ClassNotFoundException { + return loadClass(fqcn, true, classes); + } + + /** + * If classesFirst is false, loads the class via the context class + * loader of the current thread, and if not found, via the class loaders of + * the optionally specified classes in the order of their specification, and + * if not found, from the caller class loader as the + * last resort. + *

    + * If classesFirst is true, loads the class via the optionally + * specified classes in the order of their specification, and if not found, + * via the context class loader of the current thread, and if not found, + * from the caller class loader as the last resort. + * + * @param fqcn + * fully qualified class name of the target class to be loaded + * @param classesFirst + * true if the class loaders of the optionally specified classes + * take precedence over the context class loader of the current + * thread; false if the opposite is true. + * @param classes + * class loader providers + * @return the class loaded; never null + * + * @throws ClassNotFoundException if failed to load the class + */ + public static Class loadClass(String fqcn, boolean classesFirst, + Class... classes) throws ClassNotFoundException { + Class target = null; + if (classesFirst) { + target = loadClassViaClasses(fqcn, classes); + if (target == null) { + target = loadClassViaContext(fqcn); + } + } else { + target = loadClassViaContext(fqcn); + if (target == null) { + target = loadClassViaClasses(fqcn, classes); + } + } + return target == null ? Class.forName(fqcn) : target; + } + + /** + * Attempt to get the current thread's class loader and fallback to the system classloader if null + * @return a {@link ClassLoader} or null if none found + */ + private static ClassLoader contextClassLoader() { + ClassLoader threadClassLoader = Thread.currentThread().getContextClassLoader(); + if (threadClassLoader != null) { + return threadClassLoader; + } + return ClassLoader.getSystemClassLoader(); + } + + /** + * Attempt to get class loader that loads the classes and fallback to the thread context classloader if null. + * + * @param classes the classes + * @return a {@link ClassLoader} or null if none found + */ + public static ClassLoader classLoader(Class... classes) { + if (classes != null) { + for (Class clzz : classes) { + ClassLoader classLoader = clzz.getClassLoader(); + + if (classLoader != null) { + return classLoader; + } + } + } + + return contextClassLoader(); + } + +} \ No newline at end of file diff --git a/utils/src/test/java/software/amazon/awssdk/utils/BinaryUtilsTest.java b/utils/src/test/java/software/amazon/awssdk/utils/BinaryUtilsTest.java index 5f255d347adc..4e416ea9e3b6 100644 --- a/utils/src/test/java/software/amazon/awssdk/utils/BinaryUtilsTest.java +++ b/utils/src/test/java/software/amazon/awssdk/utils/BinaryUtilsTest.java @@ -16,9 +16,11 @@ package software.amazon.awssdk.utils; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import java.nio.ByteBuffer; @@ -32,13 +34,11 @@ public class BinaryUtilsTest { public void testHex() { { String hex = BinaryUtils.toHex(new byte[] {0}); - System.out.println(hex); String hex2 = Base16Lower.encodeAsString(new byte[] {0}); assertEquals(hex, hex2); } { String hex = BinaryUtils.toHex(new byte[] {-1}); - System.out.println(hex); String hex2 = Base16Lower.encodeAsString(new byte[] {-1}); assertEquals(hex, hex2); } @@ -169,7 +169,7 @@ public void testCopyRemainingBytesFrom_nullBuffer() { @Test public void testCopyRemainingBytesFrom_noRemainingBytes() { ByteBuffer bb = ByteBuffer.allocate(1); - bb.put(new byte[]{1}); + bb.put(new byte[] {1}); bb.flip(); bb.get(); @@ -180,7 +180,7 @@ public void testCopyRemainingBytesFrom_noRemainingBytes() { @Test public void testCopyRemainingBytesFrom_fullBuffer() { ByteBuffer bb = ByteBuffer.allocate(4); - bb.put(new byte[]{1, 2, 3, 4}); + bb.put(new byte[] {1, 2, 3, 4}); bb.flip(); byte[] copy = BinaryUtils.copyRemainingBytesFrom(bb); @@ -191,7 +191,7 @@ public void testCopyRemainingBytesFrom_fullBuffer() { @Test public void testCopyRemainingBytesFrom_partiallyReadBuffer() { ByteBuffer bb = ByteBuffer.allocate(4); - bb.put(new byte[]{1, 2, 3, 4}); + bb.put(new byte[] {1, 2, 3, 4}); bb.flip(); bb.get(); @@ -201,4 +201,137 @@ public void testCopyRemainingBytesFrom_partiallyReadBuffer() { assertThat(bb).isEqualTo(ByteBuffer.wrap(copy)); assertThat(copy).hasSize(2); } + + @Test + public void testImmutableCopyOfByteBuffer() { + ByteBuffer sourceBuffer = ByteBuffer.allocate(4); + byte[] originalBytesInSource = {1, 2, 3, 4}; + sourceBuffer.put(originalBytesInSource); + sourceBuffer.flip(); + + ByteBuffer immutableCopy = BinaryUtils.immutableCopyOf(sourceBuffer); + + byte[] bytesInSourceAfterCopy = {-1, -2, -3, -4}; + sourceBuffer.put(bytesInSourceAfterCopy); + sourceBuffer.flip(); + + assertTrue(immutableCopy.isReadOnly()); + byte[] fromImmutableCopy = new byte[originalBytesInSource.length]; + immutableCopy.get(fromImmutableCopy); + assertArrayEquals(originalBytesInSource, fromImmutableCopy); + + assertEquals(0, sourceBuffer.position()); + byte[] fromSource = new byte[bytesInSourceAfterCopy.length]; + sourceBuffer.get(fromSource); + assertArrayEquals(bytesInSourceAfterCopy, fromSource); + } + + @Test + public void testImmutableCopyOfByteBuffer_nullBuffer() { + assertNull(BinaryUtils.immutableCopyOf(null)); + } + + @Test + public void testImmutableCopyOfByteBuffer_partiallyReadBuffer() { + ByteBuffer sourceBuffer = ByteBuffer.allocate(4); + byte[] bytes = {1, 2, 3, 4}; + sourceBuffer.put(bytes); + sourceBuffer.position(2); + + ByteBuffer immutableCopy = BinaryUtils.immutableCopyOf(sourceBuffer); + + assertEquals(sourceBuffer.position(), immutableCopy.position()); + immutableCopy.rewind(); + byte[] fromImmutableCopy = new byte[bytes.length]; + immutableCopy.get(fromImmutableCopy); + assertArrayEquals(bytes, fromImmutableCopy); + } + + @Test + public void testImmutableCopyOfRemainingByteBuffer() { + ByteBuffer sourceBuffer = ByteBuffer.allocate(4); + byte[] originalBytesInSource = {1, 2, 3, 4}; + sourceBuffer.put(originalBytesInSource); + sourceBuffer.flip(); + + ByteBuffer immutableCopy = BinaryUtils.immutableCopyOfRemaining(sourceBuffer); + + byte[] bytesInSourceAfterCopy = {-1, -2, -3, -4}; + sourceBuffer.put(bytesInSourceAfterCopy); + sourceBuffer.flip(); + + assertTrue(immutableCopy.isReadOnly()); + byte[] fromImmutableCopy = new byte[originalBytesInSource.length]; + immutableCopy.get(fromImmutableCopy); + assertArrayEquals(originalBytesInSource, fromImmutableCopy); + + assertEquals(0, sourceBuffer.position()); + byte[] fromSource = new byte[bytesInSourceAfterCopy.length]; + sourceBuffer.get(fromSource); + assertArrayEquals(bytesInSourceAfterCopy, fromSource); + } + + @Test + public void testImmutableCopyOfByteBufferRemaining_nullBuffer() { + assertNull(BinaryUtils.immutableCopyOfRemaining(null)); + } + + @Test + public void testImmutableCopyOfByteBufferRemaining_partiallyReadBuffer() { + ByteBuffer sourceBuffer = ByteBuffer.allocate(4); + byte[] bytes = {1, 2, 3, 4}; + sourceBuffer.put(bytes); + sourceBuffer.position(2); + + ByteBuffer immutableCopy = BinaryUtils.immutableCopyOfRemaining(sourceBuffer); + + assertEquals(2, immutableCopy.capacity()); + assertEquals(2, immutableCopy.remaining()); + assertEquals(0, immutableCopy.position()); + assertEquals((byte) 3, immutableCopy.get()); + assertEquals((byte) 4, immutableCopy.get()); + } + + @Test + public void testToNonDirectBuffer() { + ByteBuffer bb = ByteBuffer.allocateDirect(4); + byte[] expected = {1, 2, 3, 4}; + bb.put(expected); + bb.flip(); + + ByteBuffer nonDirectBuffer = BinaryUtils.toNonDirectBuffer(bb); + + assertFalse(nonDirectBuffer.isDirect()); + byte[] bytes = new byte[expected.length]; + nonDirectBuffer.get(bytes); + assertArrayEquals(expected, bytes); + } + + @Test + public void testToNonDirectBuffer_nullBuffer() { + assertNull(BinaryUtils.toNonDirectBuffer(null)); + } + + @Test + public void testToNonDirectBuffer_partiallyReadBuffer() { + ByteBuffer sourceBuffer = ByteBuffer.allocateDirect(4); + byte[] bytes = {1, 2, 3, 4}; + sourceBuffer.put(bytes); + sourceBuffer.position(2); + + ByteBuffer nonDirectBuffer = BinaryUtils.toNonDirectBuffer(sourceBuffer); + + assertEquals(sourceBuffer.position(), nonDirectBuffer.position()); + nonDirectBuffer.rewind(); + byte[] fromNonDirectBuffer = new byte[bytes.length]; + nonDirectBuffer.get(fromNonDirectBuffer); + assertArrayEquals(bytes, fromNonDirectBuffer); + } + + @Test + public void testToNonDirectBuffer_nonDirectBuffer() { + ByteBuffer nonDirectBuffer = ByteBuffer.allocate(0); + assertThrows(IllegalArgumentException.class, () -> BinaryUtils.toNonDirectBuffer(nonDirectBuffer)); + } + }